:>    Well, this has turned into a rather sticky little problem.  I've
:>    spent all day going through the vnode/name-cache reclaim code, looking
:>    both at Seigo's cache_purgeleafdirs() and my own patch.
:
:   Can you forward me your patch? I'd like to try it out on some machines in
:the TSI lab.

    Absolutely, any and all testing is good.

    Here's the patch for stable and current.  Stable first:

Index: kern/vfs_subr.c
===================================================================
RCS file: /home/ncvs/src/sys/kern/vfs_subr.c,v
retrieving revision 1.249.2.11
diff -u -r1.249.2.11 vfs_subr.c
--- kern/vfs_subr.c     2001/09/11 09:49:53     1.249.2.11
+++ kern/vfs_subr.c     2001/09/23 07:33:51
@@ -506,10 +506,12 @@
                                TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
                                TAILQ_INSERT_TAIL(&vnode_tmp_list, vp, v_freelist);
                                continue;
+#if 0
                        } else if (LIST_FIRST(&vp->v_cache_src)) {
                                /* Don't recycle if active in the namecache */
                                simple_unlock(&vp->v_interlock);
                                continue;
+#endif
                        } else {
                                break;
                        }


     And here is the patch for current:

Index: kern/vfs_cache.c
===================================================================
RCS file: /home/ncvs/src/sys/kern/vfs_cache.c,v
retrieving revision 1.61
diff -u -r1.61 vfs_cache.c
--- kern/vfs_cache.c    2001/09/12 08:37:46     1.61
+++ kern/vfs_cache.c    2001/09/23 07:27:05
@@ -101,8 +101,10 @@
 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
 static u_long  numcachehv;             /* number of cache entries with vnodes held */
 SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0, "");
+#if 0
 static u_long  numcachepl;             /* number of cache purge for leaf entries */
 SYSCTL_ULONG(_debug, OID_AUTO, numcachepl, CTLFLAG_RD, &numcachepl, 0, "");
+#endif
 struct nchstats nchstats;              /* cache effectiveness statistics */
 
 static int     doingcache = 1;         /* 1 => enable the cache */
@@ -499,6 +501,8 @@
        }
 }
 
+#if 0
+
 /*
  * Flush all dirctory entries with no child directories held in
  * the cache.
@@ -554,6 +558,8 @@
        }
        numcachepl++;
 }
+
+#endif
 
 /*
  * Perform canonical checks and cache lookup and pass on to filesystem
Index: kern/vfs_subr.c
===================================================================
RCS file: /home/ncvs/src/sys/kern/vfs_subr.c,v
retrieving revision 1.319
diff -u -r1.319 vfs_subr.c
--- kern/vfs_subr.c     2001/09/12 08:37:47     1.319
+++ kern/vfs_subr.c     2001/09/22 20:15:11
@@ -110,6 +110,8 @@
 /* Number of vnodes in the free list. */
 static u_long freevnodes = 0;
 SYSCTL_LONG(_debug, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
+
+#if 0
 /* Number of vnode allocation. */
 static u_long vnodeallocs = 0;
 SYSCTL_LONG(_debug, OID_AUTO, vnodeallocs, CTLFLAG_RD, &vnodeallocs, 0, "");
@@ -125,6 +127,7 @@
 /* Number of vnodes attempted to recycle at a time. */
 static u_long vnoderecyclenumber = 3000;
 SYSCTL_LONG(_debug, OID_AUTO, vnoderecyclenumber, CTLFLAG_RW, &vnoderecyclenumber, 0, 
"");
+#endif
 
 /*
  * Various variables used for debugging the new implementation of
@@ -556,8 +559,13 @@
                 * Don't recycle if active in the namecache or
                 * if it still has cached pages or we cannot get
                 * its interlock.
+                *
+                * XXX the namei cache can hold onto vnodes too long,
+                * causing us to run out of MALLOC space.  Instead, we 
+                * should make path lookups requeue any vnodes on the free
+                * list.
                 */
-               if (LIST_FIRST(&vp->v_cache_src) != NULL ||
+               if (/* LIST_FIRST(&vp->v_cache_src) != NULL || */
                    (VOP_GETVOBJECT(vp, &object) == 0 &&
                     (object->resident_page_count || object->ref_count)) ||
                    !mtx_trylock(&vp->v_interlock)) {
@@ -636,6 +644,7 @@
 
        vfs_object_create(vp, td, td->td_proc->p_ucred);
 
+#if 0
        vnodeallocs++;
        if (vnodeallocs % vnoderecycleperiod == 0 &&
            freevnodes < vnoderecycleminfreevn &&
@@ -643,6 +652,7 @@
                /* Recycle vnodes. */
                cache_purgeleafdirs(vnoderecyclenumber);
        }
+#endif
 
        return (0);
 }
Index: sys/vnode.h
===================================================================
RCS file: /home/ncvs/src/sys/sys/vnode.h,v
retrieving revision 1.157
diff -u -r1.157 vnode.h
--- sys/vnode.h 2001/09/13 22:52:42     1.157
+++ sys/vnode.h 2001/09/23 07:26:54
@@ -559,7 +559,6 @@
            struct componentname *cnp));
 void   cache_purge __P((struct vnode *vp));
 void   cache_purgevfs __P((struct mount *mp));
-void   cache_purgeleafdirs __P((int ndir));
 void   cvtstat __P((struct stat *st, struct ostat *ost));
 void   cvtnstat __P((struct stat *sb, struct nstat *nsb));
 int    getnewvnode __P((enum vtagtype tag,

To Unsubscribe: send mail to [EMAIL PROTECTED]
with "unsubscribe freebsd-hackers" in the body of the message

Reply via email to