Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=65de5567564e70edd01b6d4e95e548d7ba284872
Commit:     65de5567564e70edd01b6d4e95e548d7ba284872
Parent:     c2f828977ba5d17c13debba374ea252d18e5ccfb
Author:     David Chinner <[EMAIL PROTECTED]>
AuthorDate: Thu Aug 16 15:21:11 2007 +1000
Committer:  Tim Shimmin <[EMAIL PROTECTED]>
CommitDate: Mon Sep 17 16:42:02 2007 +1000

    [XFS] On-demand reaping of the MRU cache
    
    Instead of running the mru cache reaper all the time based on a timeout,
    we should only run it when the cache has active objects. This allows CPUs
    to sleep when there is no activity rather than be woken repeatedly just to
    check if there is anything to do.
    
    SGI-PV: 968554
    SGI-Modid: xfs-linux-melb:xfs-kern:29305a
    
    Signed-off-by: David Chinner <[EMAIL PROTECTED]>
    Signed-off-by: Donald Douwsma <[EMAIL PROTECTED]>
    Signed-off-by: Tim Shimmin <[EMAIL PROTECTED]>
---
 fs/xfs/xfs_filestream.c |    3 +-
 fs/xfs/xfs_mru_cache.c  |   72 ++++++++++++++++++----------------------------
 fs/xfs/xfs_mru_cache.h  |    6 +--
 3 files changed, 31 insertions(+), 50 deletions(-)

diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index ce22786..16f8e17 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -467,8 +467,7 @@ void
 xfs_filestream_flush(
        xfs_mount_t     *mp)
 {
-       /* point in time flush, so keep the reaper running */
-       xfs_mru_cache_flush(mp->m_filestream, 1);
+       xfs_mru_cache_flush(mp->m_filestream);
 }
 
 /*
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index 7deb9e3..e0b358c 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -206,8 +206,11 @@ _xfs_mru_cache_list_insert(
         */
        if (!_xfs_mru_cache_migrate(mru, now)) {
                mru->time_zero = now;
-               if (!mru->next_reap)
-                       mru->next_reap = mru->grp_count * mru->grp_time;
+               if (!mru->queued) {
+                       mru->queued = 1;
+                       queue_delayed_work(xfs_mru_reap_wq, &mru->work,
+                                          mru->grp_count * mru->grp_time);
+               }
        } else {
                grp = (now - mru->time_zero) / mru->grp_time;
                grp = (mru->lru_grp + grp) % mru->grp_count;
@@ -271,29 +274,26 @@ _xfs_mru_cache_reap(
        struct work_struct      *work)
 {
        xfs_mru_cache_t         *mru = container_of(work, xfs_mru_cache_t, 
work.work);
-       unsigned long           now;
+       unsigned long           now, next;
 
        ASSERT(mru && mru->lists);
        if (!mru || !mru->lists)
                return;
 
        mutex_spinlock(&mru->lock);
-       now = jiffies;
-       if (mru->reap_all ||
-           (mru->next_reap && time_after(now, mru->next_reap))) {
-               if (mru->reap_all)
-                       now += mru->grp_count * mru->grp_time * 2;
-               mru->next_reap = _xfs_mru_cache_migrate(mru, now);
-               _xfs_mru_cache_clear_reap_list(mru);
+       next = _xfs_mru_cache_migrate(mru, jiffies);
+       _xfs_mru_cache_clear_reap_list(mru);
+
+       mru->queued = next;
+       if ((mru->queued > 0)) {
+               now = jiffies;
+               if (next <= now)
+                       next = 0;
+               else
+                       next -= now;
+               queue_delayed_work(xfs_mru_reap_wq, &mru->work, next);
        }
 
-       /*
-        * the process that triggered the reap_all is responsible
-        * for restating the periodic reap if it is required.
-        */
-       if (!mru->reap_all)
-               queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
-       mru->reap_all = 0;
        mutex_spinunlock(&mru->lock, 0);
 }
 
@@ -352,7 +352,7 @@ xfs_mru_cache_create(
 
        /* An extra list is needed to avoid reaping up to a grp_time early. */
        mru->grp_count = grp_count + 1;
-       mru->lists = kmem_alloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP);
+       mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), 
KM_SLEEP);
 
        if (!mru->lists) {
                err = ENOMEM;
@@ -374,11 +374,6 @@ xfs_mru_cache_create(
        mru->grp_time  = grp_time;
        mru->free_func = free_func;
 
-       /* start up the reaper event */
-       mru->next_reap = 0;
-       mru->reap_all = 0;
-       queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
-
        *mrup = mru;
 
 exit:
@@ -394,35 +389,25 @@ exit:
  * Call xfs_mru_cache_flush() to flush out all cached entries, calling their
  * free functions as they're deleted.  When this function returns, the caller 
is
  * guaranteed that all the free functions for all the elements have finished
- * executing.
- *
- * While we are flushing, we stop the periodic reaper event from triggering.
- * Normally, we want to restart this periodic event, but if we are shutting
- * down the cache we do not want it restarted. hence the restart parameter
- * where 0 = do not restart reaper and 1 = restart reaper.
+ * executing and the reaper is not running.
  */
 void
 xfs_mru_cache_flush(
-       xfs_mru_cache_t         *mru,
-       int                     restart)
+       xfs_mru_cache_t         *mru)
 {
        if (!mru || !mru->lists)
                return;
 
-       cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
-
        mutex_spinlock(&mru->lock);
-       mru->reap_all = 1;
-       mutex_spinunlock(&mru->lock, 0);
+       if (mru->queued) {
+               mutex_spinunlock(&mru->lock, 0);
+               cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
+               mutex_spinlock(&mru->lock);
+       }
 
-       queue_work(xfs_mru_reap_wq, &mru->work.work);
-       flush_workqueue(xfs_mru_reap_wq);
+       _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time);
+       _xfs_mru_cache_clear_reap_list(mru);
 
-       mutex_spinlock(&mru->lock);
-       WARN_ON_ONCE(mru->reap_all != 0);
-       mru->reap_all = 0;
-       if (restart)
-               queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
        mutex_spinunlock(&mru->lock, 0);
 }
 
@@ -433,8 +418,7 @@ xfs_mru_cache_destroy(
        if (!mru || !mru->lists)
                return;
 
-       /* we don't want the reaper to restart here */
-       xfs_mru_cache_flush(mru, 0);
+       xfs_mru_cache_flush(mru);
 
        kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists));
        kmem_free(mru, sizeof(*mru));
diff --git a/fs/xfs/xfs_mru_cache.h b/fs/xfs/xfs_mru_cache.h
index 624fd10..dd58ea1 100644
--- a/fs/xfs/xfs_mru_cache.h
+++ b/fs/xfs/xfs_mru_cache.h
@@ -32,11 +32,9 @@ typedef struct xfs_mru_cache
        unsigned int            grp_time;  /* Time period spanned by grps.  */
        unsigned int            lru_grp;   /* Group containing time zero.   */
        unsigned long           time_zero; /* Time first element was added. */
-       unsigned long           next_reap; /* Time that the reaper should
-                                             next do something. */
-       unsigned int            reap_all;  /* if set, reap all lists */
        xfs_mru_cache_free_func_t free_func; /* Function pointer for freeing. */
        struct delayed_work     work;      /* Workqueue data for reaping.   */
+       unsigned int            queued;    /* work has been queued */
 } xfs_mru_cache_t;
 
 int xfs_mru_cache_init(void);
@@ -44,7 +42,7 @@ void xfs_mru_cache_uninit(void);
 int xfs_mru_cache_create(struct xfs_mru_cache **mrup, unsigned int lifetime_ms,
                             unsigned int grp_count,
                             xfs_mru_cache_free_func_t free_func);
-void xfs_mru_cache_flush(xfs_mru_cache_t *mru, int restart);
+void xfs_mru_cache_flush(xfs_mru_cache_t *mru);
 void xfs_mru_cache_destroy(struct xfs_mru_cache *mru);
 int xfs_mru_cache_insert(struct xfs_mru_cache *mru, unsigned long key,
                                void *value);
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to