Module Name:    src
Committed By:   thorpej
Date:           Fri Aug  5 05:20:39 UTC 2022

Modified Files:
        src/sys/kern: vfs_vnode.c
        src/sys/sys: vnode.h

Log Message:
In vcache_reclaim(), post NOTE_REVOKE immediately after changing the
vnode state to VS_RECLAIMING, before we actually call VOP_RECLAIM(),
which will release the reference on the lower node of a stacked FS
vnode, which is likely to free the upper node's v_klist backing store.

Acquire the vnode interlock when checking for kevent interest now,
because the vp->v_klist pointer is now volatile.

PR kern/56950


To generate a diff of this commit:
cvs rdiff -u -r1.144 -r1.145 src/sys/kern/vfs_vnode.c
cvs rdiff -u -r1.302 -r1.303 src/sys/sys/vnode.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/kern/vfs_vnode.c
diff -u src/sys/kern/vfs_vnode.c:1.144 src/sys/kern/vfs_vnode.c:1.145
--- src/sys/kern/vfs_vnode.c:1.144	Mon Jul 18 04:30:30 2022
+++ src/sys/kern/vfs_vnode.c	Fri Aug  5 05:20:39 2022
@@ -1,4 +1,4 @@
-/*	$NetBSD: vfs_vnode.c,v 1.144 2022/07/18 04:30:30 thorpej Exp $	*/
+/*	$NetBSD: vfs_vnode.c,v 1.145 2022/08/05 05:20:39 thorpej Exp $	*/
 
 /*-
  * Copyright (c) 1997-2011, 2019, 2020 The NetBSD Foundation, Inc.
@@ -148,7 +148,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.144 2022/07/18 04:30:30 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.145 2022/08/05 05:20:39 thorpej Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_pax.h"
@@ -1455,8 +1455,7 @@ vcache_free(vnode_impl_t *vip)
 	mutex_obj_free(vp->v_interlock);
 	rw_destroy(&vip->vi_lock);
 	uvm_obj_destroy(&vp->v_uobj, true);
-	KASSERT(vp->v_klist == &vip->vi_klist ||
-		SLIST_EMPTY(&vip->vi_klist.vk_klist));
+	KASSERT(vp->v_klist == &vip->vi_klist);
 	klist_fini(&vip->vi_klist.vk_klist);
 	cv_destroy(&vp->v_cv);
 	cache_vnode_fini(vp);
@@ -1830,6 +1829,19 @@ vcache_reclaim(vnode_t *vp)
 	 * while we clean it out.
 	 */
 	VSTATE_CHANGE(vp, VS_BLOCKED, VS_RECLAIMING);
+
+	/*
+	 * Send NOTE_REVOKE now, before we call VOP_RECLAIM(),
+	 * because VOP_RECLAIM() could cause vp->v_klist to
+	 * become invalid.  Don't check for interest in NOTE_REVOKE
+	 * here; it's always posted because it sets EV_EOF.
+	 *
+	 * Once it's been posted, reset vp->v_klist to point to
+	 * our own local storage, in case we were sharing with
+	 * someone else.
+	 */
+	KNOTE(&vp->v_klist->vk_klist, NOTE_REVOKE);
+	vp->v_klist = &vip->vi_klist;
 	mutex_exit(vp->v_interlock);
 
 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
@@ -1916,11 +1928,6 @@ vcache_reclaim(vnode_t *vp)
 	vp->v_op = dead_vnodeop_p;
 	VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
 	vp->v_tag = VT_NON;
-	/*
-	 * Don't check for interest in NOTE_REVOKE; it's always posted
-	 * because it sets EV_EOF.
-	 */
-	KNOTE(&vp->v_klist->vk_klist, NOTE_REVOKE);
 	mutex_exit(vp->v_interlock);
 
 	/*

Index: src/sys/sys/vnode.h
diff -u src/sys/sys/vnode.h:1.302 src/sys/sys/vnode.h:1.303
--- src/sys/sys/vnode.h:1.302	Mon Jul 18 04:30:30 2022
+++ src/sys/sys/vnode.h	Fri Aug  5 05:20:39 2022
@@ -1,4 +1,4 @@
-/*	$NetBSD: vnode.h,v 1.302 2022/07/18 04:30:30 thorpej Exp $	*/
+/*	$NetBSD: vnode.h,v 1.303 2022/08/05 05:20:39 thorpej Exp $	*/
 
 /*-
  * Copyright (c) 2008, 2020 The NetBSD Foundation, Inc.
@@ -427,24 +427,26 @@ void vref(struct vnode *);
 /*
  * Macro to determine kevent interest on a vnode.
  */
-#define	VN_KEVENT_INTEREST(vp, n)					\
+#define	_VN_KEVENT_INTEREST(vp, n)					\
 	(((vp)->v_klist->vk_interest & (n)) != 0)
 
+static inline bool
+VN_KEVENT_INTEREST(struct vnode *vp, long hint)
+{
+	mutex_enter(vp->v_interlock);
+	bool rv = _VN_KEVENT_INTEREST(vp, hint);
+	mutex_exit(vp->v_interlock);
+	return rv;
+}
+
 static inline void
 VN_KNOTE(struct vnode *vp, long hint)
 {
-	/*
-	 * Testing for klist interest unlocked is fine here, as registering
-	 * interest in vnode events is inherently racy with respect to other
-	 * system activity anyway.  Having knotes attached to vnodes is
-	 * actually incredibly rare, so we want to void having to take locks,
-	 * etc. in what is the overwhelmingly common case.
-	 */
-	if (__predict_false(VN_KEVENT_INTEREST(vp, hint))) {
-		mutex_enter(vp->v_interlock);
+	mutex_enter(vp->v_interlock);
+	if (__predict_false(_VN_KEVENT_INTEREST(vp, hint))) {
 		knote(&vp->v_klist->vk_klist, hint);
-		mutex_exit(vp->v_interlock);
 	}
+	mutex_exit(vp->v_interlock);
 }
 
 void	vn_knote_attach(struct vnode *, struct knote *);

Reply via email to