This patch will fix a race by surround ls_cb_mutex in set_bit() and the
test_bit() and it's conditional code blocks for LSFL_CB_DELAY.

The function dlm_callback_stop() has the idea to stop all callbacks and
flush all currently queued onces. The set_bit() is not enough because
there can be still queue_work() around after the workqueue was flushed.
To avoid queue_work() after set_bit() we surround both by ls_cb_mutex
lock.

Cc: sta...@vger.kernel.org
Signed-off-by: Alexander Aring <aahri...@redhat.com>
---
 fs/dlm/ast.c | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index 19ef136f9e4f..a44cc42b6317 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -200,13 +200,13 @@ void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int 
mode, int status,
        if (!prev_seq) {
                kref_get(&lkb->lkb_ref);
 
+               mutex_lock(&ls->ls_cb_mutex);
                if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) {
-                       mutex_lock(&ls->ls_cb_mutex);
                        list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay);
-                       mutex_unlock(&ls->ls_cb_mutex);
                } else {
                        queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
                }
+               mutex_unlock(&ls->ls_cb_mutex);
        }
  out:
        mutex_unlock(&lkb->lkb_cb_mutex);
@@ -288,7 +288,9 @@ void dlm_callback_stop(struct dlm_ls *ls)
 
 void dlm_callback_suspend(struct dlm_ls *ls)
 {
+       mutex_lock(&ls->ls_cb_mutex);
        set_bit(LSFL_CB_DELAY, &ls->ls_flags);
+       mutex_unlock(&ls->ls_cb_mutex);
 
        if (ls->ls_callback_wq)
                flush_workqueue(ls->ls_callback_wq);
-- 
2.31.1

Reply via email to