Hi, Now in the -nmw git tree. Thanks,
Steve. On Fri, 2009-07-10 at 18:04 -0500, Benjamin Marzinski wrote: > s placing far to many glocks on the reclaim list that were not good > candidates for freeing up from cache. These locks would sit there and > repeatedly get scanned to see if they could be reclaimed, wasting a lot > of time when there was memory pressure. This fix does more checks on the > locks to see if they are actually likely to be removable from cache. > > Signed-off-by: Benjamin Marzinski <[email protected]> > --- > fs/gfs2/glock.c | 72 > +++++++++++++++++++++++++++++++++++--------------------- > 1 file changed, 46 insertions(+), 26 deletions(-) > > Index: gfs2-2.6-nmw/fs/gfs2/glock.c > =================================================================== > --- gfs2-2.6-nmw.orig/fs/gfs2/glock.c > +++ gfs2-2.6-nmw/fs/gfs2/glock.c > @@ -174,6 +174,26 @@ static void gfs2_glock_hold(struct gfs2_ > } > > /** > + * demote_ok - Check to see if it's ok to unlock a glock > + * @gl: the glock > + * > + * Returns: 1 if it's ok > + */ > + > +static int demote_ok(const struct gfs2_glock *gl) > +{ > + const struct gfs2_glock_operations *glops = gl->gl_ops; > + > + if (gl->gl_state == LM_ST_UNLOCKED) > + return 0; > + if (!list_empty(&gl->gl_holders)) > + return 0; > + if (glops->go_demote_ok) > + return glops->go_demote_ok(gl); > + return 1; > +} > + > +/** > * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list > * @gl: the glock > * > @@ -181,8 +201,13 @@ static void gfs2_glock_hold(struct gfs2_ > > static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) > { > + int may_reclaim; > + may_reclaim = (demote_ok(gl) && > + (atomic_read(&gl->gl_ref) == 1 || > + (gl->gl_name.ln_type == LM_TYPE_INODE && > + atomic_read(&gl->gl_ref) <= 2))); > spin_lock(&lru_lock); > - if (list_empty(&gl->gl_lru) && gl->gl_state != LM_ST_UNLOCKED) { > + if (list_empty(&gl->gl_lru) && may_reclaim) { > list_add_tail(&gl->gl_lru, &lru_list); > atomic_inc(&lru_count); > } > @@ -190,6 +215,21 @@ static void gfs2_glock_schedule_for_recl > } > > /** > + * gfs2_glock_put_nolock() - Decrement reference count on glock > + * @gl: The glock to put > + * > + * This function should only be used if the caller has its own reference > + * to the glock, in addition to the one it is dropping. > + */ > + > +static void gfs2_glock_put_nolock(struct gfs2_glock *gl) > +{ > + if (atomic_dec_and_test(&gl->gl_ref)) > + GLOCK_BUG_ON(gl, 1); > + gfs2_glock_schedule_for_reclaim(gl); > +} > + > +/** > * gfs2_glock_put() - Decrement reference count on glock > * @gl: The glock to put > * > @@ -214,9 +254,9 @@ int gfs2_glock_put(struct gfs2_glock *gl > rv = 1; > goto out; > } > - /* 1 for being hashed, 1 for having state != LM_ST_UNLOCKED */ > - if (atomic_read(&gl->gl_ref) == 2) > - gfs2_glock_schedule_for_reclaim(gl); > + spin_lock(&gl->gl_spin); > + gfs2_glock_schedule_for_reclaim(gl); > + spin_unlock(&gl->gl_spin); > write_unlock(gl_lock_addr(gl->gl_hash)); > out: > return rv; > @@ -398,7 +438,7 @@ static void state_change(struct gfs2_glo > if (held2) > gfs2_glock_hold(gl); > else > - gfs2_glock_put(gl); > + gfs2_glock_put_nolock(gl); > } > > gl->gl_state = new_state; > @@ -633,7 +673,7 @@ out: > out_sched: > gfs2_glock_hold(gl); > if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) > - gfs2_glock_put(gl); > + gfs2_glock_put_nolock(gl); > out_unlock: > clear_bit(GLF_LOCK, &gl->gl_flags); > goto out; > @@ -1274,26 +1314,6 @@ void gfs2_glock_complete(struct gfs2_glo > gfs2_glock_put(gl); > } > > -/** > - * demote_ok - Check to see if it's ok to unlock a glock > - * @gl: the glock > - * > - * Returns: 1 if it's ok > - */ > - > -static int demote_ok(const struct gfs2_glock *gl) > -{ > - const struct gfs2_glock_operations *glops = gl->gl_ops; > - > - if (gl->gl_state == LM_ST_UNLOCKED) > - return 0; > - if (!list_empty(&gl->gl_holders)) > - return 0; > - if (glops->go_demote_ok) > - return glops->go_demote_ok(gl); > - return 1; > -} > - > > static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) > { >
