Hi,

Before this patch, recovery would cause all callbacks to be delayed,
put on a queue, and afterward they were all queued to the callback
work queue. This patch does the same thing, but occasionally takes
a break after 25 of them so it won't swamp the CPU at the expense
of other RT processes like corosync.

Signed-off-by: Bob Peterson <rpete...@redhat.com>
---
 fs/dlm/ast.c | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index 562fa8c3edff..47ee66d70109 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -292,6 +292,8 @@ void dlm_callback_suspend(struct dlm_ls *ls)
                flush_workqueue(ls->ls_callback_wq);
 }
 
+#define MAX_CB_QUEUE 25
+
 void dlm_callback_resume(struct dlm_ls *ls)
 {
        struct dlm_lkb *lkb, *safe;
@@ -302,15 +304,23 @@ void dlm_callback_resume(struct dlm_ls *ls)
        if (!ls->ls_callback_wq)
                return;
 
+more:
        mutex_lock(&ls->ls_cb_mutex);
        list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
                list_del_init(&lkb->lkb_cb_list);
                queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
                count++;
+               if (count == MAX_CB_QUEUE)
+                       break;
        }
        mutex_unlock(&ls->ls_cb_mutex);
 
        if (count)
                log_rinfo(ls, "dlm_callback_resume %d", count);
+       if (count == MAX_CB_QUEUE) {
+               count = 0;
+               cond_resched();
+               goto more;
+       }
 }
 

Reply via email to