First, we don't need to grab and ungrab the lock just to check
getnewvode's error status. No race there.

Second, since we're already prepared to deal with losing a race and
retrying, we can move the blocking pool_get operation out of the lock,
too. Fewer pileups in the event the code does block.

Index: nfs_node.c
===================================================================
RCS file: /cvs/src/sys/nfs/nfs_node.c,v
retrieving revision 1.59
diff -u -p -r1.59 nfs_node.c
--- nfs_node.c  16 Dec 2014 18:30:04 -0000      1.59
+++ nfs_node.c  19 Dec 2014 00:15:18 -0000
@@ -118,23 +118,27 @@ loop:
         */
        rw_exit_write(&nfs_hashlock);
        error = getnewvnode(VT_NFS, mnt, &nfs_vops, &nvp);
-       /* note that we don't have this vnode set up completely yet */
-       rw_enter_write(&nfs_hashlock);
        if (error) {
                *npp = NULL;
-               rw_exit_write(&nfs_hashlock);
                return (error);
        }
+       /* grab one of these too while we're outside the lock */
+       np2 = pool_get(&nfs_node_pool, PR_WAITOK | PR_ZERO);
+
+       /* note that we don't have this vnode set up completely yet */
+       rw_enter_write(&nfs_hashlock);
        nvp->v_flag |= VLARVAL;
        np = RB_FIND(nfs_nodetree, &nmp->nm_ntree, &find);
+       /* lost race. undo and repeat */
        if (np != NULL) {
+               pool_put(&nfs_node_pool, np2);
                vgone(nvp);
                rw_exit_write(&nfs_hashlock);
                goto loop;
        }
 
        vp = nvp;
-       np = pool_get(&nfs_node_pool, PR_WAITOK | PR_ZERO);
+       np = np2;
        vp->v_data = np;
        /* we now have an nfsnode on this vnode */
        vp->v_flag &= ~VLARVAL;

Reply via email to