deadfs cleanup
chkvnlock() is useless, since deadfs vops are only ever assigned
to a vnode at the tail end of vclean(), at which point the VXLOCK
has been cleared and won't be taken again for this particular
vnode until it is re-used through getnewvnode().
As a bonus, LK_DRAIN can soon retire as well.
Juggle the tail end (mtx enter/leave) and the knote at the tail
of vclean() for sanity while here.
diff --git sys/kern/vfs_subr.c sys/kern/vfs_subr.c
index 650fe5b61a2..425b6871cdd 100644
--- sys/kern/vfs_subr.c
+++ sys/kern/vfs_subr.c
@@ -1051,7 +1051,7 @@ vclean(struct vnode *vp, int flags, struct proc *p)
* For active vnodes, it ensures that no other activity can
* occur while the underlying object is being cleaned out.
*/
- VOP_LOCK(vp, LK_EXCLUSIVE | LK_DRAIN);
+ VOP_LOCK(vp, LK_EXCLUSIVE);
/*
* Clean out any VM data associated with the vnode.
@@ -1099,19 +1099,21 @@ vclean(struct vnode *vp, int flags, struct proc *p)
/*
* Done with purge, notify sleepers of the grim news.
*/
+ mtx_enter(_mtx);
vp->v_op = _vops;
- VN_KNOTE(vp, NOTE_REVOKE);
vp->v_tag = VT_NON;
#ifdef VFSLCKDEBUG
vp->v_flag &= ~VLOCKSWORK;
#endif
- mtx_enter(_mtx);
vp->v_lflag &= ~VXLOCK;
if (vp->v_lflag & VXWANT) {
vp->v_lflag &= ~VXWANT;
do_wakeup = 1;
}
mtx_leave(_mtx);
+
+ VN_KNOTE(vp, NOTE_REVOKE);
+
if (do_wakeup)
wakeup(vp);
}
diff --git sys/miscfs/deadfs/dead_vnops.c sys/miscfs/deadfs/dead_vnops.c
index 9711f1618be..44496815567 100644
--- sys/miscfs/deadfs/dead_vnops.c
+++ sys/miscfs/deadfs/dead_vnops.c
@@ -49,16 +49,10 @@ int dead_ebadf(void *);
intdead_open(void *);
intdead_read(void *);
intdead_write(void *);
-intdead_ioctl(void *);
intdead_kqfilter(void *v);
-intdead_inactive(void *);
-intdead_lock(void *);
-intdead_bmap(void *);
intdead_strategy(void *);
intdead_print(void *);
-intchkvnlock(struct vnode *);
-
const struct vops dead_vops = {
.vop_lookup = vop_generic_lookup,
.vop_create = vop_generic_badop,
@@ -70,7 +64,7 @@ const struct vops dead_vops = {
.vop_setattr= dead_ebadf,
.vop_read = dead_read,
.vop_write = dead_write,
- .vop_ioctl = dead_ioctl,
+ .vop_ioctl = nullop,
.vop_kqfilter = dead_kqfilter,
.vop_revoke = NULL,
.vop_fsync = nullop,
@@ -83,12 +77,12 @@ const struct vops dead_vops = {
.vop_readdir= dead_ebadf,
.vop_readlink = dead_ebadf,
.vop_abortop= vop_generic_badop,
- .vop_inactive = dead_inactive,
+ .vop_inactive = nullop,
.vop_reclaim= nullop,
- .vop_lock = dead_lock,
+ .vop_lock = nullop,
.vop_unlock = nullop,
.vop_islocked = nullop,
- .vop_bmap = dead_bmap,
+ .vop_bmap = nullop,
.vop_strategy = dead_strategy,
.vop_print = dead_print,
.vop_pathconf = dead_ebadf,
@@ -105,50 +99,25 @@ dead_open(void *v)
return (ENXIO);
}
-/*
- * Vnode op for read
- */
int
dead_read(void *v)
{
struct vop_read_args *ap = v;
- if (chkvnlock(ap->a_vp))
- panic("dead_read: lock");
/*
-* Return EOF for tty devices, EIO for others
-*/
+ * Return EOF for tty devices, EIO for others
+ */
if ((ap->a_vp->v_flag & VISTTY) == 0)
return (EIO);
return (0);
}
-/*
- * Vnode op for write
- */
int
dead_write(void *v)
{
- struct vop_write_args *ap = v;
-
- if (chkvnlock(ap->a_vp))
- panic("dead_write: lock");
return (EIO);
}
-/*
- * Device ioctl operation.
- */
-int
-dead_ioctl(void *v)
-{
- struct vop_ioctl_args *ap = v;
-
- if (!chkvnlock(ap->a_vp))
- return (EBADF);
- return ((ap->a_vp->v_op->vop_ioctl)(ap));
-}
-
int
dead_kqfilter(void *v)
{
@@ -180,51 +149,11 @@ dead_strategy(void *v)
struct vop_strategy_args *ap = v;
int s;
- if (ap->a_bp->b_vp == NULL || !chkvnlock(ap->a_bp->b_vp)) {
- ap->a_bp->b_flags |= B_ERROR;
- s = splbio();
- biodone(ap->a_bp);
- splx(s);
- return (EIO);
- }
- return (VOP_STRATEGY(ap->a_bp->b_vp, ap->a_bp));
-}
-
-int
-dead_inactive(void *v)
-{
- struct vop_inactive_args *ap = v;
-
- VOP_UNLOCK(ap->a_vp);
- return (0);
-}
-
-/*
- * Wait until the vnode has finished changing state.
- */
-int
-dead_lock(void *v)
-{
- struct vop_lock_args *ap = v;
- struct vnode *vp = ap->a_vp;
-
- if (ap->a_flags & LK_DRAIN || !chkvnlock(vp))
- return (0);
-
- return VOP_LOCK(vp, ap->a_flags);
-}
-
-/*
- * Wait until the vnode has