This patch makes the writequeue and message handling ready to be called
from a softirq by using spinlock handling to stop software interrupts
on local cpu while they are hold. The coming midcomms re-transmit
handling will introduce a timer which is using this functionality when
the timer expires.

Signed-off-by: Alexander Aring <[email protected]>
---
 fs/dlm/lowcomms.c | 28 ++++++++++++++--------------
 1 file changed, 14 insertions(+), 14 deletions(-)

diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 438badc2d870..a84223b549ed 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -1393,7 +1393,7 @@ static struct writequeue_entry *new_wq_entry(struct 
connection *con, int len,
 {
        struct writequeue_entry *e;
 
-       spin_lock(&con->writequeue_lock);
+       spin_lock_bh(&con->writequeue_lock);
        if (!list_empty(&con->writequeue)) {
                e = list_last_entry(&con->writequeue, struct writequeue_entry, 
list);
                if (DLM_WQ_REMAIN_BYTES(e) >= len) {
@@ -1405,12 +1405,12 @@ static struct writequeue_entry *new_wq_entry(struct 
connection *con, int len,
 
                        e->end += len;
                        e->users++;
-                       spin_unlock(&con->writequeue_lock);
+                       spin_unlock_bh(&con->writequeue_lock);
 
                        return e;
                }
        }
-       spin_unlock(&con->writequeue_lock);
+       spin_unlock_bh(&con->writequeue_lock);
 
        e = new_writequeue_entry(con, allocation);
        if (!e)
@@ -1420,12 +1420,12 @@ static struct writequeue_entry *new_wq_entry(struct 
connection *con, int len,
        *ppc = page_address(e->page);
        e->end += len;
 
-       spin_lock(&con->writequeue_lock);
+       spin_lock_bh(&con->writequeue_lock);
        if (cb)
                cb(*ppc, priv);
 
        list_add_tail(&e->list, &con->writequeue);
-       spin_unlock(&con->writequeue_lock);
+       spin_unlock_bh(&con->writequeue_lock);
 
        return e;
 };
@@ -1474,7 +1474,7 @@ void dlm_lowcomms_commit_buffer(void *mh)
        struct connection *con = e->con;
        int users;
 
-       spin_lock(&con->writequeue_lock);
+       spin_lock_bh(&con->writequeue_lock);
        list_add(&msg->list, &e->msgs);
        kref_get(&msg->ref);
 
@@ -1483,13 +1483,13 @@ void dlm_lowcomms_commit_buffer(void *mh)
                goto out;
 
        e->len = DLM_WQ_LENGTH_BYTES(e);
-       spin_unlock(&con->writequeue_lock);
+       spin_unlock_bh(&con->writequeue_lock);
 
        queue_work(send_workqueue, &con->swork);
        return;
 
 out:
-       spin_unlock(&con->writequeue_lock);
+       spin_unlock_bh(&con->writequeue_lock);
        return;
 }
 
@@ -1520,7 +1520,7 @@ static void send_to_sock(struct connection *con)
        if (con->sock == NULL)
                goto out_connect;
 
-       spin_lock(&con->writequeue_lock);
+       spin_lock_bh(&con->writequeue_lock);
        for (;;) {
                if (list_empty(&con->writequeue))
                        break;
@@ -1529,7 +1529,7 @@ static void send_to_sock(struct connection *con)
                len = e->len;
                offset = e->offset;
                BUG_ON(len == 0 && e->users == 0);
-               spin_unlock(&con->writequeue_lock);
+               spin_unlock_bh(&con->writequeue_lock);
 
                ret = 0;
                if (len) {
@@ -1557,10 +1557,10 @@ static void send_to_sock(struct connection *con)
                        count = 0;
                }
 
-               spin_lock(&con->writequeue_lock);
+               spin_lock_bh(&con->writequeue_lock);
                writequeue_entry_complete(e, ret);
        }
-       spin_unlock(&con->writequeue_lock);
+       spin_unlock_bh(&con->writequeue_lock);
 out:
        mutex_unlock(&con->sock_mutex);
        return;
@@ -1583,11 +1583,11 @@ static void clean_one_writequeue(struct connection *con)
 {
        struct writequeue_entry *e, *safe;
 
-       spin_lock(&con->writequeue_lock);
+       spin_lock_bh(&con->writequeue_lock);
        list_for_each_entry_safe(e, safe, &con->writequeue, list) {
                free_entry(e);
        }
-       spin_unlock(&con->writequeue_lock);
+       spin_unlock_bh(&con->writequeue_lock);
 }
 
 /* Called from recovery when it knows that a node has
-- 
2.26.2

Reply via email to