On Thu, Oct 04, 2007 at 02:04:25PM -0400, Wendy Cheng wrote:
> Red Hat bugzilla 295641... Wendy
> 
> 
> 

> Fix a race condition where multiple glock demote requests are sent to 
> a node back-to-back. This patch does a check inside handle_callback() 
> to see whether a demote request is in progress. If true, it sets a flag 
> to make sure run_queue() will loop again to handle the new request, 
> instead of erroneously setting gl_demote_state to a different state.
> 
> Signed-off-by: S. Wendy Cheng <[EMAIL PROTECTED]>
> 
>  glock.c  |   13 ++++++++++++-
>  incore.h |    2 ++
>  2 files changed, 14 insertions(+), 1 deletion(-)
> 
> --- e48-brew/fs/gfs2/incore.h 2007-09-20 17:29:00.000000000 -0500
> +++ e48/fs/gfs2/incore.h      2007-10-02 16:45:38.000000000 -0500
> @@ -171,6 +171,7 @@ enum {
>       GLF_DEMOTE              = 3,
>       GLF_PENDING_DEMOTE      = 4,
>       GLF_DIRTY               = 5,
> +     GLF_DEMOTE_IN_PROGRESS  = 6,
>  };
>  
>  struct gfs2_glock {
> @@ -190,6 +191,7 @@ struct gfs2_glock {
>       struct list_head gl_holders;
>       struct list_head gl_waiters1;   /* HIF_MUTEX */
>       struct list_head gl_waiters3;   /* HIF_PROMOTE */
> +     int gl_waiters2;                /* GIF_DEMOTE */
>  
>       const struct gfs2_glock_operations *gl_ops;
>  
> --- e48-brew/fs/gfs2/glock.c  2007-09-18 11:26:53.000000000 -0500
> +++ e48/fs/gfs2/glock.c       2007-10-02 18:19:12.000000000 -0500
> @@ -567,6 +567,7 @@ static int rq_demote(struct gfs2_glock *
>               return 0;
>       }
>       set_bit(GLF_LOCK, &gl->gl_flags);
> +     set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
>       if (gl->gl_demote_state == LM_ST_UNLOCKED ||
>           gl->gl_state != LM_ST_EXCLUSIVE) {
>               spin_unlock(&gl->gl_spin);
> @@ -575,6 +576,8 @@ static int rq_demote(struct gfs2_glock *
>               spin_unlock(&gl->gl_spin);
>               gfs2_glock_xmote_th(gl, NULL);
>       }
> +
> +     clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
>       spin_lock(&gl->gl_spin);
>

May want to move the clear_bit inside the spin_lock so you don't end up having
the same race where we clear the bit while inside handle_callback.

Josef 

Reply via email to