Module Name:    src
Committed By:   riastradh
Date:           Sun Dec 19 12:36:24 UTC 2021

Modified Files:
        src/sys/external/bsd/drm2/linux: linux_ww_mutex.c

Log Message:
drm: Spruce up ww_mutex comments.  Audit return values.


To generate a diff of this commit:
cvs rdiff -u -r1.9 -r1.10 src/sys/external/bsd/drm2/linux/linux_ww_mutex.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/external/bsd/drm2/linux/linux_ww_mutex.c
diff -u src/sys/external/bsd/drm2/linux/linux_ww_mutex.c:1.9 src/sys/external/bsd/drm2/linux/linux_ww_mutex.c:1.10
--- src/sys/external/bsd/drm2/linux/linux_ww_mutex.c:1.9	Sun Dec 19 11:21:20 2021
+++ src/sys/external/bsd/drm2/linux/linux_ww_mutex.c	Sun Dec 19 12:36:24 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: linux_ww_mutex.c,v 1.9 2021/12/19 11:21:20 riastradh Exp $	*/
+/*	$NetBSD: linux_ww_mutex.c,v 1.10 2021/12/19 12:36:24 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.9 2021/12/19 11:21:20 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.10 2021/12/19 12:36:24 riastradh Exp $");
 
 #include <sys/types.h>
 #include <sys/atomic.h>
@@ -168,6 +168,13 @@ static lockops_t ww_lockops = {
 };
 #endif
 
+/*
+ * ww_mutex_init(mutex, class)
+ *
+ *	Initialize mutex in the given class.  Must precede any other
+ *	ww_mutex_* operations.  After done, mutex must be destroyed
+ *	with ww_mutex_destroy.
+ */
 void
 ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class)
 {
@@ -187,6 +194,13 @@ ww_mutex_init(struct ww_mutex *mutex, st
 #endif
 }
 
+/*
+ * ww_mutex_destroy(mutex)
+ *
+ *	Destroy mutex initialized by ww_mutex_init.  Caller must not be
+ *	with any other ww_mutex_* operations except after
+ *	reinitializing with ww_mutex_init.
+ */
 void
 ww_mutex_destroy(struct ww_mutex *mutex)
 {
@@ -205,9 +219,15 @@ ww_mutex_destroy(struct ww_mutex *mutex)
 }
 
 /*
- * XXX WARNING: This returns true if it is locked by ANYONE.  Does not
- * mean `Do I hold this lock?' (answering which really requires an
- * acquire context).
+ * ww_mutex_is_locked(mutex)
+ *
+ *	True if anyone holds mutex locked at the moment, false if not.
+ *	Answer is stale as soon returned unless mutex is held by
+ *	caller.
+ *
+ *	XXX WARNING: This returns true if it is locked by ANYONE.  Does
+ *	not mean `Do I hold this lock?' (answering which really
+ *	requires an acquire context).
  */
 bool
 ww_mutex_is_locked(struct ww_mutex *mutex)
@@ -233,33 +253,78 @@ ww_mutex_is_locked(struct ww_mutex *mute
 	return locked;
 }
 
+/*
+ * ww_mutex_state_wait(mutex, state)
+ *
+ *	Wait for mutex, which must be in the given state, to transition
+ *	to another state.  Uninterruptible; never fails.
+ *
+ *	Caller must hold mutex's internal lock.
+ *
+ *	May sleep.
+ *
+ *	Internal subroutine.
+ */
 static void
 ww_mutex_state_wait(struct ww_mutex *mutex, enum ww_mutex_state state)
 {
 
+	KASSERT(mutex_owned(&mutex->wwm_lock));
 	KASSERT(mutex->wwm_state == state);
 	do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
 	while (mutex->wwm_state == state);
 }
 
+/*
+ * ww_mutex_state_wait_sig(mutex, state)
+ *
+ *	Wait for mutex, which must be in the given state, to transition
+ *	to another state, or fail if interrupted by a signal.  Return 0
+ *	on success, -EINTR if interrupted by a signal.
+ *
+ *	Caller must hold mutex's internal lock.
+ *
+ *	May sleep.
+ *
+ *	Internal subroutine.
+ */
 static int
 ww_mutex_state_wait_sig(struct ww_mutex *mutex, enum ww_mutex_state state)
 {
 	int ret;
 
+	KASSERT(mutex_owned(&mutex->wwm_lock));
 	KASSERT(mutex->wwm_state == state);
 	do {
 		/* XXX errno NetBSD->Linux */
 		ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
-		if (ret == -ERESTART)
-			ret = -ERESTARTSYS;
-		if (ret)
+		if (ret) {
+			KASSERTMSG((ret == -EINTR || ret == -ERESTART),
+			    "ret=%d", ret);
+			ret = -EINTR;
 			break;
+		}
 	} while (mutex->wwm_state == state);
 
+	KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
 	return ret;
 }
 
+/*
+ * ww_mutex_lock_wait(mutex, ctx)
+ *
+ *	With mutex locked and in the WW_CTX or WW_WANTOWN state, owned
+ *	by another thread with an acquire context, wait to acquire
+ *	mutex.  While waiting, record ctx in the tree of waiters.  Does
+ *	not update the mutex state otherwise.
+ *
+ *	Caller must not already hold mutex.  Caller must hold mutex's
+ *	internal lock.  Uninterruptible; never fails.
+ *
+ *	May sleep.
+ *
+ *	Internal subroutine.
+ */
 static void
 ww_mutex_lock_wait(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
 {
@@ -291,6 +356,22 @@ ww_mutex_lock_wait(struct ww_mutex *mute
 	rb_tree_remove_node(&mutex->wwm_waiters, ctx);
 }
 
+/*
+ * ww_mutex_lock_wait_sig(mutex, ctx)
+ *
+ *	With mutex locked and in the WW_CTX or WW_WANTOWN state, owned
+ *	by another thread with an acquire context, wait to acquire
+ *	mutex and return 0, or return -EINTR if interrupted by a
+ *	signal.  While waiting, record ctx in the tree of waiters.
+ *	Does not update the mutex state otherwise.
+ *
+ *	Caller must not already hold mutex.  Caller must hold mutex's
+ *	internal lock.
+ *
+ *	May sleep.
+ *
+ *	Internal subroutine.
+ */
 static int
 ww_mutex_lock_wait_sig(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
 {
@@ -318,18 +399,31 @@ ww_mutex_lock_wait_sig(struct ww_mutex *
 	do {
 		/* XXX errno NetBSD->Linux */
 		ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
-		if (ret == -ERESTART)
-			ret = -ERESTARTSYS;
-		if (ret)
+		if (ret) {
+			KASSERTMSG((ret == -EINTR || ret == -ERESTART),
+			    "ret=%d", ret);
+			ret = -EINTR;
 			goto out;
+		}
 	} while (!(((mutex->wwm_state == WW_CTX) ||
 		    (mutex->wwm_state == WW_WANTOWN)) &&
 		(mutex->wwm_u.ctx == ctx)));
 
 out:	rb_tree_remove_node(&mutex->wwm_waiters, ctx);
+	KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
 	return ret;
 }
 
+/*
+ * ww_mutex_lock_noctx(mutex)
+ *
+ *	Acquire mutex without an acquire context.  Caller must not
+ *	already hold the mutex.  Uninterruptible; never fails.
+ *
+ *	May sleep.
+ *
+ *	Internal subroutine, implementing ww_mutex_lock(..., NULL).
+ */
 static void
 ww_mutex_lock_noctx(struct ww_mutex *mutex)
 {
@@ -364,6 +458,18 @@ retry:	switch (mutex->wwm_state) {
 	mutex_exit(&mutex->wwm_lock);
 }
 
+/*
+ * ww_mutex_lock_noctx_sig(mutex)
+ *
+ *	Acquire mutex without an acquire context and return 0, or fail
+ *	and return -EINTR if interrupted by a signal.  Caller must not
+ *	already hold the mutex.
+ *
+ *	May sleep.
+ *
+ *	Internal subroutine, implementing
+ *	ww_mutex_lock_interruptible(..., NULL).
+ */
 static int
 ww_mutex_lock_noctx_sig(struct ww_mutex *mutex)
 {
@@ -379,8 +485,10 @@ retry:	switch (mutex->wwm_state) {
 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
 		    "locking %p against myself: %p", mutex, curlwp);
 		ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
-		if (ret)
+		if (ret) {
+			KASSERTMSG(ret == -EINTR, "ret=%d", ret);
 			goto out;
+		}
 		goto retry;
 	case WW_CTX:
 		KASSERT(mutex->wwm_u.ctx != NULL);
@@ -390,8 +498,10 @@ retry:	switch (mutex->wwm_state) {
 		KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
 		    "locking %p against myself: %p", mutex, curlwp);
 		ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
-		if (ret)
+		if (ret) {
+			KASSERTMSG(ret == -EINTR, "ret=%d", ret);
 			goto out;
+		}
 		goto retry;
 	default:
 		panic("wait/wound mutex %p in bad state: %d",
@@ -402,12 +512,29 @@ retry:	switch (mutex->wwm_state) {
 	WW_LOCKED(mutex);
 	ret = 0;
 out:	mutex_exit(&mutex->wwm_lock);
+	KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
 	return ret;
 }
 
+/*
+ * ww_mutex_lock(mutex, ctx)
+ *
+ *	Lock the mutex and return 0, or fail if impossible.
+ *
+ *	- If ctx is null, caller must not hold mutex, and ww_mutex_lock
+ *	  always succeeds and returns 0.
+ *
+ *	- If ctx is nonnull, then:
+ *	  . Fail with -EALREADY if caller already holds mutex.
+ *	  . Fail with -EDEADLK if someone else holds mutex but there is
+ *	    a cycle.
+ *
+ *	May sleep.
+ */
 int
 ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
 {
+	int ret;
 
 	/*
 	 * We do not WW_WANTLOCK at the beginning because we may
@@ -419,7 +546,8 @@ ww_mutex_lock(struct ww_mutex *mutex, st
 	if (ctx == NULL) {
 		WW_WANTLOCK(mutex);
 		ww_mutex_lock_noctx(mutex);
-		return 0;
+		ret = 0;
+		goto out;
 	}
 
 	KASSERTMSG((ctx->wwx_owner == curlwp),
@@ -466,8 +594,8 @@ retry:	switch (mutex->wwm_state) {
 		 * for objects whose locking order is determined by
 		 * userland.
 		 */
-		mutex_exit(&mutex->wwm_lock);
-		return -EALREADY;
+		ret = -EALREADY;
+		goto out_unlock;
 	}
 
 	/*
@@ -484,8 +612,8 @@ retry:	switch (mutex->wwm_state) {
 		KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
 		    "ww mutex class mismatch: %p != %p",
 		    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
-		mutex_exit(&mutex->wwm_lock);
-		return -EDEADLK;
+		ret = -EDEADLK;
+		goto out_unlock;
 	}
 
 	/*
@@ -499,10 +627,31 @@ locked:	KASSERT((mutex->wwm_state == WW_
 	KASSERT(mutex->wwm_u.ctx == ctx);
 	WW_LOCKED(mutex);
 	ctx->wwx_acquired++;
+	ret = 0;
+out_unlock:
 	mutex_exit(&mutex->wwm_lock);
-	return 0;
+out:	KASSERTMSG((ret == 0 || ret == -EALREADY || ret == -EDEADLK),
+	    "ret=%d", ret);
+	return ret;
 }
 
+/*
+ * ww_mutex_lock_interruptible(mutex, ctx)
+ *
+ *	Lock the mutex and return 0, or fail if impossible or
+ *	interrupted.
+ *
+ *	- If ctx is null, caller must not hold mutex, and ww_mutex_lock
+ *	  always succeeds and returns 0.
+ *
+ *	- If ctx is nonnull, then:
+ *	  . Fail with -EALREADY if caller already holds mutex.
+ *	  . Fail with -EDEADLK if someone else holds mutex but there is
+ *	    a cycle.
+ *	  . Fail with -EINTR if interrupted by a signal.
+ *
+ *	May sleep.
+ */
 int
 ww_mutex_lock_interruptible(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
 {
@@ -517,7 +666,9 @@ ww_mutex_lock_interruptible(struct ww_mu
 
 	if (ctx == NULL) {
 		WW_WANTLOCK(mutex);
-		return ww_mutex_lock_noctx_sig(mutex);
+		ret = ww_mutex_lock_noctx_sig(mutex);
+		KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
+		goto out;
 	}
 
 	KASSERTMSG((ctx->wwx_owner == curlwp),
@@ -542,15 +693,19 @@ retry:	switch (mutex->wwm_state) {
 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
 		    "locking %p against myself: %p", mutex, curlwp);
 		ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
-		if (ret)
-			goto out;
+		if (ret) {
+			KASSERTMSG(ret == -EINTR, "ret=%d", ret);
+			goto out_unlock;
+		}
 		goto retry;
 	case WW_CTX:
 		break;
 	case WW_WANTOWN:
 		ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
-		if (ret)
-			goto out;
+		if (ret) {
+			KASSERTMSG(ret == -EINTR, "ret=%d", ret);
+			goto out_unlock;
+		}
 		goto retry;
 	default:
 		panic("wait/wound mutex %p in bad state: %d",
@@ -568,8 +723,8 @@ retry:	switch (mutex->wwm_state) {
 		 * for objects whose locking order is determined by
 		 * userland.
 		 */
-		mutex_exit(&mutex->wwm_lock);
-		return -EALREADY;
+		ret = -EALREADY;
+		goto out_unlock;
 	}
 
 	/*
@@ -586,8 +741,8 @@ retry:	switch (mutex->wwm_state) {
 		KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
 		    "ww mutex class mismatch: %p != %p",
 		    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
-		mutex_exit(&mutex->wwm_lock);
-		return -EDEADLK;
+		ret = -EDEADLK;
+		goto out_unlock;
 	}
 
 	/*
@@ -595,8 +750,10 @@ retry:	switch (mutex->wwm_state) {
 	 * when it is done or it realizes it needs to back off.
 	 */
 	ret = ww_mutex_lock_wait_sig(mutex, ctx);
-	if (ret)
-		goto out;
+	if (ret) {
+		KASSERTMSG(ret == -EINTR, "ret=%d", ret);
+		goto out_unlock;
+	}
 
 locked:	KASSERT((mutex->wwm_state == WW_CTX) ||
 	    (mutex->wwm_state == WW_WANTOWN));
@@ -604,10 +761,25 @@ locked:	KASSERT((mutex->wwm_state == WW_
 	WW_LOCKED(mutex);
 	ctx->wwx_acquired++;
 	ret = 0;
-out:	mutex_exit(&mutex->wwm_lock);
+out_unlock:
+	mutex_exit(&mutex->wwm_lock);
+out:	KASSERTMSG((ret == 0 || ret == -EALREADY || ret == -EDEADLK ||
+		ret == -EINTR), "ret=%d", ret);
 	return ret;
 }
 
+/*
+ * ww_mutex_lock_slow(mutex, ctx)
+ *
+ *	Slow path: After ww_mutex_lock* has failed with -EDEADLK, and
+ *	after the caller has ditched all its locks, wait for the owner
+ *	of mutex to relinquish mutex before the caller can start over
+ *	acquiring locks again.
+ *
+ *	Uninterruptible; never fails.
+ *
+ *	May sleep.
+ */
 void
 ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
 {
@@ -674,6 +846,17 @@ locked:	KASSERT((mutex->wwm_state == WW_
 	mutex_exit(&mutex->wwm_lock);
 }
 
+/*
+ * ww_mutex_lock_slow(mutex, ctx)
+ *
+ *	Slow path: After ww_mutex_lock* has failed with -EDEADLK, and
+ *	after the caller has ditched all its locks, wait for the owner
+ *	of mutex to relinquish mutex before the caller can start over
+ *	acquiring locks again, or fail with -EINTR if interrupted by a
+ *	signal.
+ *
+ *	May sleep.
+ */
 int
 ww_mutex_lock_slow_interruptible(struct ww_mutex *mutex,
     struct ww_acquire_ctx *ctx)
@@ -683,8 +866,11 @@ ww_mutex_lock_slow_interruptible(struct 
 	WW_WANTLOCK(mutex);
 	ASSERT_SLEEPABLE();
 
-	if (ctx == NULL)
-		return ww_mutex_lock_noctx_sig(mutex);
+	if (ctx == NULL) {
+		ret = ww_mutex_lock_noctx_sig(mutex);
+		KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
+		goto out;
+	}
 
 	KASSERTMSG((ctx->wwx_owner == curlwp),
 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
@@ -709,15 +895,19 @@ retry:	switch (mutex->wwm_state) {
 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
 		    "locking %p against myself: %p", mutex, curlwp);
 		ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
-		if (ret)
-			goto out;
+		if (ret) {
+			KASSERTMSG(ret == -EINTR, "ret=%d", ret);
+			goto out_unlock;
+		}
 		goto retry;
 	case WW_CTX:
 		break;
 	case WW_WANTOWN:
 		ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
-		if (ret)
-			goto out;
+		if (ret) {
+			KASSERTMSG(ret == -EINTR, "ret=%d", ret);
+			goto out_unlock;
+		}
 		goto retry;
 	default:
 		panic("wait/wound mutex %p in bad state: %d",
@@ -734,8 +924,10 @@ retry:	switch (mutex->wwm_state) {
 	 * wake us when it's done.
 	 */
 	ret = ww_mutex_lock_wait_sig(mutex, ctx);
-	if (ret)
-		goto out;
+	if (ret) {
+		KASSERTMSG(ret == -EINTR, "ret=%d", ret);
+		goto out_unlock;
+	}
 
 locked:	KASSERT((mutex->wwm_state == WW_CTX) ||
 	    (mutex->wwm_state == WW_WANTOWN));
@@ -743,10 +935,18 @@ locked:	KASSERT((mutex->wwm_state == WW_
 	WW_LOCKED(mutex);
 	ctx->wwx_acquired++;
 	ret = 0;
-out:	mutex_exit(&mutex->wwm_lock);
+out_unlock:
+	mutex_exit(&mutex->wwm_lock);
+out:	KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
 	return ret;
 }
 
+/*
+ * ww_mutex_trylock(mutex)
+ *
+ *	Tro to acquire mutex and return 1, but if it can't be done
+ *	immediately, return 0.
+ */
 int
 ww_mutex_trylock(struct ww_mutex *mutex)
 {
@@ -791,6 +991,17 @@ ww_mutex_trylock(struct ww_mutex *mutex)
 	return ret;
 }
 
+/*
+ * ww_mutex_unlock_release(mutex)
+ *
+ *	Decrement the number of mutexes acquired in the current locking
+ *	context of mutex, which must be held by the caller and in
+ *	WW_CTX or WW_WANTOWN state, and clear the mutex's reference.
+ *	Caller must hold the internal lock of mutex, and is responsible
+ *	for notifying waiters.
+ *
+ *	Internal subroutine.
+ */
 static void
 ww_mutex_unlock_release(struct ww_mutex *mutex)
 {
@@ -808,6 +1019,11 @@ ww_mutex_unlock_release(struct ww_mutex 
 	mutex->wwm_u.ctx = NULL;
 }
 
+/*
+ * ww_mutex_unlock(mutex)
+ *
+ *	Release mutex and wake the next caller waiting, if any.
+ */
 void
 ww_mutex_unlock(struct ww_mutex *mutex)
 {
@@ -848,6 +1064,12 @@ ww_mutex_unlock(struct ww_mutex *mutex)
 	mutex_exit(&mutex->wwm_lock);
 }
 
+/*
+ * ww_mutex_locking_ctx(mutex)
+ *
+ *	Return the current acquire context of mutex.  Answer is stale
+ *	as soon as returned unless mutex is held by caller.
+ */
 struct ww_acquire_ctx *
 ww_mutex_locking_ctx(struct ww_mutex *mutex)
 {

Reply via email to