Module Name:    src
Committed By:   riastradh
Date:           Sat Apr  9 23:52:23 UTC 2022

Modified Files:
        src/sys/kern: uipc_socket.c uipc_socket2.c uipc_usrreq.c
        src/sys/sys: socketvar.h

Log Message:
unix(4): Convert membar_exit to membar_release.

Use atomic_load_consume or atomic_load_relaxed where necessary.

Comment on why unlocked nonatomic access is valid where it is done.


To generate a diff of this commit:
cvs rdiff -u -r1.301 -r1.302 src/sys/kern/uipc_socket.c
cvs rdiff -u -r1.140 -r1.141 src/sys/kern/uipc_socket2.c
cvs rdiff -u -r1.201 -r1.202 src/sys/kern/uipc_usrreq.c
cvs rdiff -u -r1.164 -r1.165 src/sys/sys/socketvar.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/kern/uipc_socket.c
diff -u src/sys/kern/uipc_socket.c:1.301 src/sys/kern/uipc_socket.c:1.302
--- src/sys/kern/uipc_socket.c:1.301	Sat Mar 12 16:06:15 2022
+++ src/sys/kern/uipc_socket.c	Sat Apr  9 23:52:22 2022
@@ -1,4 +1,4 @@
-/*	$NetBSD: uipc_socket.c,v 1.301 2022/03/12 16:06:15 riastradh Exp $	*/
+/*	$NetBSD: uipc_socket.c,v 1.302 2022/04/09 23:52:22 riastradh Exp $	*/
 
 /*
  * Copyright (c) 2002, 2007, 2008, 2009 The NetBSD Foundation, Inc.
@@ -71,7 +71,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uipc_socket.c,v 1.301 2022/03/12 16:06:15 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uipc_socket.c,v 1.302 2022/04/09 23:52:22 riastradh Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_compat_netbsd.h"
@@ -539,6 +539,10 @@ socreate(int dom, struct socket **aso, i
 	 * the lock with another socket, e.g. socketpair(2) case.
 	 */
 	if (lockso) {
+		/*
+		 * lockso->so_lock should be stable at this point, so
+		 * no need for atomic_load_*.
+		 */
 		lock = lockso->so_lock;
 		so->so_lock = lock;
 		mutex_obj_hold(lock);

Index: src/sys/kern/uipc_socket2.c
diff -u src/sys/kern/uipc_socket2.c:1.140 src/sys/kern/uipc_socket2.c:1.141
--- src/sys/kern/uipc_socket2.c:1.140	Sat Oct  2 02:07:41 2021
+++ src/sys/kern/uipc_socket2.c	Sat Apr  9 23:52:23 2022
@@ -1,4 +1,4 @@
-/*	$NetBSD: uipc_socket2.c,v 1.140 2021/10/02 02:07:41 thorpej Exp $	*/
+/*	$NetBSD: uipc_socket2.c,v 1.141 2022/04/09 23:52:23 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 2008 The NetBSD Foundation, Inc.
@@ -58,7 +58,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uipc_socket2.c,v 1.140 2021/10/02 02:07:41 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uipc_socket2.c,v 1.141 2022/04/09 23:52:23 riastradh Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_ddb.h"
@@ -134,7 +134,7 @@ __KERNEL_RCSID(0, "$NetBSD: uipc_socket2
  *
  * o In order to mutate so_lock, the lock pointed to by the current value
  *   of so_lock must be held: i.e., the socket must be held locked by the
- *   changing thread.  The thread must issue membar_exit() to prevent
+ *   changing thread.  The thread must issue membar_release() to prevent
  *   memory accesses being reordered, and can set so_lock to the desired
  *   value.  If the lock pointed to by the new value of so_lock is not
  *   held by the changing thread, the socket must then be considered
@@ -324,6 +324,9 @@ sonewconn(struct socket *head, bool sore
 	/*
 	 * Share the lock with the listening-socket, it may get unshared
 	 * once the connection is complete.
+	 *
+	 * so_lock is stable while we hold the socket locked, so no
+	 * need for atomic_load_* here.
 	 */
 	mutex_obj_hold(head->so_lock);
 	so->so_lock = head->so_lock;
@@ -537,7 +540,7 @@ sbwait(struct sockbuf *sb)
 		error = cv_timedwait(&sb->sb_cv, lock, sb->sb_timeo);
 	else
 		error = cv_timedwait_sig(&sb->sb_cv, lock, sb->sb_timeo);
-	if (__predict_false(lock != so->so_lock))
+	if (__predict_false(lock != atomic_load_relaxed(&so->so_lock)))
 		solockretry(so, lock);
 	return error;
 }
@@ -589,6 +592,8 @@ sowakeup(struct socket *so, struct sockb
  * Reset a socket's lock pointer.  Wake all threads waiting on the
  * socket's condition variables so that they can restart their waits
  * using the new lock.  The existing lock must be held.
+ *
+ * Caller must have issued membar_release before this.
  */
 void
 solockreset(struct socket *so, kmutex_t *lock)
@@ -1464,9 +1469,9 @@ void
 solockretry(struct socket *so, kmutex_t *lock)
 {
 
-	while (lock != so->so_lock) {
+	while (lock != atomic_load_relaxed(&so->so_lock)) {
 		mutex_exit(lock);
-		lock = so->so_lock;
+		lock = atomic_load_consume(&so->so_lock);
 		mutex_enter(lock);
 	}
 }
@@ -1475,6 +1480,10 @@ bool
 solocked(const struct socket *so)
 {
 
+	/*
+	 * Used only for diagnostic assertions, so so_lock should be
+	 * stable at this point, hence on need for atomic_load_*.
+	 */
 	return mutex_owned(so->so_lock);
 }
 
@@ -1483,6 +1492,10 @@ solocked2(const struct socket *so1, cons
 {
 	const kmutex_t *lock;
 
+	/*
+	 * Used only for diagnostic assertions, so so_lock should be
+	 * stable at this point, hence on need for atomic_load_*.
+	 */
 	lock = so1->so_lock;
 	if (lock != so2->so_lock)
 		return false;
@@ -1533,7 +1546,7 @@ sblock(struct sockbuf *sb, int wf)
 			error = 0;
 		} else
 			error = cv_wait_sig(&so->so_cv, lock);
-		if (__predict_false(lock != so->so_lock))
+		if (__predict_false(lock != atomic_load_relaxed(&so->so_lock)))
 			solockretry(so, lock);
 		if (error != 0)
 			return error;
@@ -1568,7 +1581,7 @@ sowait(struct socket *so, bool catch_p, 
 		error = cv_timedwait_sig(&so->so_cv, lock, timo);
 	else
 		error = cv_timedwait(&so->so_cv, lock, timo);
-	if (__predict_false(lock != so->so_lock))
+	if (__predict_false(lock != atomic_load_relaxed(&so->so_lock)))
 		solockretry(so, lock);
 	return error;
 }

Index: src/sys/kern/uipc_usrreq.c
diff -u src/sys/kern/uipc_usrreq.c:1.201 src/sys/kern/uipc_usrreq.c:1.202
--- src/sys/kern/uipc_usrreq.c:1.201	Sun Aug  8 20:54:48 2021
+++ src/sys/kern/uipc_usrreq.c	Sat Apr  9 23:52:23 2022
@@ -1,4 +1,4 @@
-/*	$NetBSD: uipc_usrreq.c,v 1.201 2021/08/08 20:54:48 nia Exp $	*/
+/*	$NetBSD: uipc_usrreq.c,v 1.202 2022/04/09 23:52:23 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 1998, 2000, 2004, 2008, 2009, 2020 The NetBSD Foundation, Inc.
@@ -96,7 +96,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uipc_usrreq.c,v 1.201 2021/08/08 20:54:48 nia Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uipc_usrreq.c,v 1.202 2022/04/09 23:52:23 riastradh Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_compat_netbsd.h"
@@ -291,7 +291,12 @@ unp_setpeerlocks(struct socket *so, stru
 	lock = unp->unp_streamlock;
 	unp->unp_streamlock = NULL;
 	mutex_obj_hold(lock);
-	membar_exit();
+	/*
+	 * Ensure lock is initialized before publishing it with
+	 * solockreset.  Pairs with atomic_load_consume in solock and
+	 * various loops to reacquire lock after wakeup.
+	 */
+	membar_release();
 	/*
 	 * possible race if lock is not held - see comment in
 	 * uipc_usrreq(PRU_ACCEPT).

Index: src/sys/sys/socketvar.h
diff -u src/sys/sys/socketvar.h:1.164 src/sys/sys/socketvar.h:1.165
--- src/sys/sys/socketvar.h:1.164	Sat Oct 23 01:28:33 2021
+++ src/sys/sys/socketvar.h	Sat Apr  9 23:52:23 2022
@@ -1,4 +1,4 @@
-/*	$NetBSD: socketvar.h,v 1.164 2021/10/23 01:28:33 thorpej Exp $	*/
+/*	$NetBSD: socketvar.h,v 1.165 2022/04/09 23:52:23 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc.
@@ -74,6 +74,7 @@ struct uio;
 struct lwp;
 struct uidinfo;
 #else
+#include <sys/atomic.h>
 #include <sys/uidinfo.h>
 #endif
 
@@ -520,12 +521,12 @@ solock(struct socket *so)
 {
 	kmutex_t *lock;
 
-	lock = so->so_lock;
+	lock = atomic_load_consume(&so->so_lock);
 	mutex_enter(lock);
-	if (__predict_false(lock != so->so_lock))
+	if (__predict_false(lock != atomic_load_relaxed(&so->so_lock)))
 		solockretry(so, lock);
 }
-	
+
 static __inline void
 sounlock(struct socket *so)
 {

Reply via email to