Module Name:    src
Committed By:   hannken
Date:           Fri Jan 27 10:50:10 UTC 2017

Modified Files:
        src/sys/kern: vfs_mount.c vfs_vnode.c

Log Message:
Vrecycle() cannot wait for the vnode lock.  On a leaf file system this lock
will always succeed as we hold the last reference and prevent further
references.  On layered file systems waiting for the lock would open a can of
deadlocks as the lower vnodes may have other active references.


To generate a diff of this commit:
cvs rdiff -u -r1.46 -r1.47 src/sys/kern/vfs_mount.c
cvs rdiff -u -r1.72 -r1.73 src/sys/kern/vfs_vnode.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/kern/vfs_mount.c
diff -u src/sys/kern/vfs_mount.c:1.46 src/sys/kern/vfs_mount.c:1.47
--- src/sys/kern/vfs_mount.c:1.46	Fri Jan 27 10:46:18 2017
+++ src/sys/kern/vfs_mount.c	Fri Jan 27 10:50:10 2017
@@ -1,4 +1,4 @@
-/*	$NetBSD: vfs_mount.c,v 1.46 2017/01/27 10:46:18 hannken Exp $	*/
+/*	$NetBSD: vfs_mount.c,v 1.47 2017/01/27 10:50:10 hannken Exp $	*/
 
 /*-
  * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vfs_mount.c,v 1.46 2017/01/27 10:46:18 hannken Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vfs_mount.c,v 1.47 2017/01/27 10:50:10 hannken Exp $");
 
 #include <sys/param.h>
 #include <sys/kernel.h>
@@ -551,30 +551,35 @@ vflush(struct mount *mp, vnode_t *skipvp
 {
 	vnode_t *vp;
 	struct vnode_iterator *marker;
-	int busy, error, when;
+	int busy, error, when, retries = 2;
 
-	busy = error = when = 0;
+	do {
+		busy = error = when = 0;
 
-	/* First, flush out any vnode references from deferred vrele list. */
-	vfs_drainvnodes();
+		/*
+		 * First, flush out any vnode references from the
+		 * deferred vrele list.
+		 */
+		vfs_drainvnodes();
 
-	vfs_vnode_iterator_init(mp, &marker);
+		vfs_vnode_iterator_init(mp, &marker);
 
-	while ((vp = vflushnext(marker, &when)) != NULL) {
-		error = vflush_one(vp, skipvp, flags);
-		if (error == EBUSY) {
-			error = 0;
-			busy++;
+		while ((vp = vflushnext(marker, &when)) != NULL) {
+			error = vflush_one(vp, skipvp, flags);
+			if (error == EBUSY) {
+				error = 0;
+				busy++;
 #ifdef DEBUG
-			if (busyprt)
-				vprint("vflush: busy vnode", vp);
+				if (busyprt && retries == 0)
+					vprint("vflush: busy vnode", vp);
 #endif
-		} else if (error != 0) {
-			break;
+			} else if (error != 0) {
+				break;
+			}
 		}
-	}
 
-	vfs_vnode_iterator_destroy(marker);
+		vfs_vnode_iterator_destroy(marker);
+	} while (error == 0 && busy > 0 && retries-- > 0);
 
 	if (error)
 		return error;

Index: src/sys/kern/vfs_vnode.c
diff -u src/sys/kern/vfs_vnode.c:1.72 src/sys/kern/vfs_vnode.c:1.73
--- src/sys/kern/vfs_vnode.c:1.72	Wed Jan 11 09:08:58 2017
+++ src/sys/kern/vfs_vnode.c	Fri Jan 27 10:50:10 2017
@@ -1,4 +1,4 @@
-/*	$NetBSD: vfs_vnode.c,v 1.72 2017/01/11 09:08:58 hannken Exp $	*/
+/*	$NetBSD: vfs_vnode.c,v 1.73 2017/01/27 10:50:10 hannken Exp $	*/
 
 /*-
  * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
@@ -156,7 +156,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.72 2017/01/11 09:08:58 hannken Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.73 2017/01/27 10:50:10 hannken Exp $");
 
 #include <sys/param.h>
 #include <sys/kernel.h>
@@ -872,12 +872,22 @@ vrecycle(vnode_t *vp)
 	VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
 	mutex_exit(vp->v_interlock);
 
-	error = vn_lock(vp, LK_EXCLUSIVE);
-	KASSERT(error == 0);
+	/*
+	 * On a leaf file system this lock will always succeed as we hold
+	 * the last reference and prevent further references.
+	 * On layered file systems waiting for the lock would open a can of
+	 * deadlocks as the lower vnodes may have other active references.
+	 */
+	error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
 
 	mutex_enter(vp->v_interlock);
 	VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
 
+	if (error) {
+		mutex_exit(vp->v_interlock);
+		return false;
+	}
+
 	KASSERT(vp->v_usecount == 1);
 	vcache_reclaim(vp);
 	vrelel(vp, 0);

Reply via email to