This patch changes the ls_cb_mutex to a rw lock. The hotpath in
dlm_add_cb() can be called by different lkbs at the same time. Currently
parallel dlm_add_cb() could block because the cb mutex. To change that
we using a rw lock and use a readers lock in dlm_add_cb() only. The cb
mutex is only needed that dlm_callback_suspend() and
dlm_callback_resume() cannot run at the same time as the specific part
in dlm_add_cb() those will use a writers lock to stop any callback
queueing in dlm_add_cb().

Signed-off-by: Alexander Aring <aahri...@redhat.com>
---
 fs/dlm/ast.c          | 12 ++++++------
 fs/dlm/dlm_internal.h |  2 +-
 fs/dlm/lockspace.c    |  2 +-
 3 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index 6e07c151ad28..43588c8ab5fc 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -200,13 +200,13 @@ void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int 
mode, int status,
        if (!prev_seq) {
                kref_get(&lkb->lkb_ref);
 
-               mutex_lock(&ls->ls_cb_mutex);
+               read_lock(&ls->ls_cb_lock);
                if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) {
                        list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay);
                } else {
                        queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
                }
-               mutex_unlock(&ls->ls_cb_mutex);
+               read_unlock(&ls->ls_cb_lock);
        }
  out:
        mutex_unlock(&lkb->lkb_cb_mutex);
@@ -289,9 +289,9 @@ void dlm_callback_stop(struct dlm_ls *ls)
 void dlm_callback_suspend(struct dlm_ls *ls)
 {
        if (ls->ls_callback_wq) {
-               mutex_lock(&ls->ls_cb_mutex);
+               write_lock(&ls->ls_cb_lock);
                set_bit(LSFL_CB_DELAY, &ls->ls_flags);
-               mutex_unlock(&ls->ls_cb_mutex);
+               write_unlock(&ls->ls_cb_lock);
 
                flush_workqueue(ls->ls_callback_wq);
        }
@@ -309,7 +309,7 @@ void dlm_callback_resume(struct dlm_ls *ls)
                return;
 
 more:
-       mutex_lock(&ls->ls_cb_mutex);
+       write_lock(&ls->ls_cb_lock);
        list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
                list_del_init(&lkb->lkb_cb_list);
                queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
@@ -320,7 +320,7 @@ void dlm_callback_resume(struct dlm_ls *ls)
        empty = list_empty(&ls->ls_cb_delay);
        if (empty)
                clear_bit(LSFL_CB_DELAY, &ls->ls_flags);
-       mutex_unlock(&ls->ls_cb_mutex);
+       write_unlock(&ls->ls_cb_lock);
 
        sum += count;
        if (!empty) {
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index e34c3d2639a5..67f928544d26 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -631,7 +631,7 @@ struct dlm_ls {
 
        /* recovery related */
 
-       struct mutex            ls_cb_mutex;
+       rwlock_t                ls_cb_lock;
        struct list_head        ls_cb_delay; /* save for queue_work later */
        struct timer_list       ls_timer;
        struct task_struct      *ls_recoverd_task;
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index bae050df7abf..b15956e82842 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -567,7 +567,7 @@ static int new_lockspace(const char *name, const char 
*cluster,
        init_completion(&ls->ls_recovery_done);
        ls->ls_recovery_result = -1;
 
-       mutex_init(&ls->ls_cb_mutex);
+       rwlock_init(&ls->ls_cb_lock);
        INIT_LIST_HEAD(&ls->ls_cb_delay);
 
        ls->ls_recoverd_task = NULL;
-- 
2.31.1

Reply via email to