The only generic interface to execute asynchronously in the BH context is
tasklet; however, it's marked deprecated and has some design flaws. To
replace tasklets, BH workqueue support was recently added. A BH workqueue
behaves similarly to regular workqueues except that the queued work items
are executed in the BH context.

This patch converts drivers/infiniband/* from tasklet to BH workqueue.

Based on the work done by Tejun Heo <t...@kernel.org>
Branch: https://git.kernel.org/pub/scm/linux/kernel/git/tj/wq.git for-6.10

Signed-off-by: Allen Pais <allen.l...@gmail.com>
---
 drivers/char/ipmi/ipmi_msghandler.c | 30 ++++++++++++++---------------
 1 file changed, 15 insertions(+), 15 deletions(-)

diff --git a/drivers/char/ipmi/ipmi_msghandler.c 
b/drivers/char/ipmi/ipmi_msghandler.c
index b0eedc4595b3..fce2a2dbdc82 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -36,12 +36,13 @@
 #include <linux/nospec.h>
 #include <linux/vmalloc.h>
 #include <linux/delay.h>
+#include <linux/workqueue.h>
 
 #define IPMI_DRIVER_VERSION "39.2"
 
 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
 static int ipmi_init_msghandler(void);
-static void smi_recv_tasklet(struct tasklet_struct *t);
+static void smi_recv_work(struct work_struct *t);
 static void handle_new_recv_msgs(struct ipmi_smi *intf);
 static void need_waiter(struct ipmi_smi *intf);
 static int handle_one_recv_msg(struct ipmi_smi *intf,
@@ -498,13 +499,13 @@ struct ipmi_smi {
        /*
         * Messages queued for delivery.  If delivery fails (out of memory
         * for instance), They will stay in here to be processed later in a
-        * periodic timer interrupt.  The tasklet is for handling received
+        * periodic timer interrupt.  The work is for handling received
         * messages directly from the handler.
         */
        spinlock_t       waiting_rcv_msgs_lock;
        struct list_head waiting_rcv_msgs;
        atomic_t         watchdog_pretimeouts_to_deliver;
-       struct tasklet_struct recv_tasklet;
+       struct work_struct recv_work;
 
        spinlock_t             xmit_msgs_lock;
        struct list_head       xmit_msgs;
@@ -704,7 +705,7 @@ static void clean_up_interface_data(struct ipmi_smi *intf)
        struct cmd_rcvr  *rcvr, *rcvr2;
        struct list_head list;
 
-       tasklet_kill(&intf->recv_tasklet);
+       cancel_work_sync(&intf->recv_work);
 
        free_smi_msg_list(&intf->waiting_rcv_msgs);
        free_recv_msg_list(&intf->waiting_events);
@@ -1319,7 +1320,7 @@ static void free_user(struct kref *ref)
 {
        struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
 
-       /* SRCU cleanup must happen in task context. */
+       /* SRCU cleanup must happen in work context. */
        queue_work(remove_work_wq, &user->remove_work);
 }
 
@@ -3605,8 +3606,7 @@ int ipmi_add_smi(struct module         *owner,
        intf->curr_seq = 0;
        spin_lock_init(&intf->waiting_rcv_msgs_lock);
        INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
-       tasklet_setup(&intf->recv_tasklet,
-                    smi_recv_tasklet);
+       INIT_WORK(&intf->recv_work, smi_recv_work);
        atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
        spin_lock_init(&intf->xmit_msgs_lock);
        INIT_LIST_HEAD(&intf->xmit_msgs);
@@ -4779,7 +4779,7 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
                         * To preserve message order, quit if we
                         * can't handle a message.  Add the message
                         * back at the head, this is safe because this
-                        * tasklet is the only thing that pulls the
+                        * work is the only thing that pulls the
                         * messages.
                         */
                        list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
@@ -4812,10 +4812,10 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
        }
 }
 
-static void smi_recv_tasklet(struct tasklet_struct *t)
+static void smi_recv_work(struct work_struct *t)
 {
        unsigned long flags = 0; /* keep us warning-free. */
-       struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet);
+       struct ipmi_smi *intf = from_work(intf, t, recv_work);
        int run_to_completion = intf->run_to_completion;
        struct ipmi_smi_msg *newmsg = NULL;
 
@@ -4866,7 +4866,7 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
 
        /*
         * To preserve message order, we keep a queue and deliver from
-        * a tasklet.
+        * a work.
         */
        if (!run_to_completion)
                spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
@@ -4887,9 +4887,9 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
                spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
 
        if (run_to_completion)
-               smi_recv_tasklet(&intf->recv_tasklet);
+               smi_recv_work(&intf->recv_work);
        else
-               tasklet_schedule(&intf->recv_tasklet);
+               queue_work(system_bh_wq, &intf->recv_work);
 }
 EXPORT_SYMBOL(ipmi_smi_msg_received);
 
@@ -4899,7 +4899,7 @@ void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
                return;
 
        atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
-       tasklet_schedule(&intf->recv_tasklet);
+       queue_work(system_bh_wq, &intf->recv_work);
 }
 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
 
@@ -5068,7 +5068,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
                                       flags);
        }
 
-       tasklet_schedule(&intf->recv_tasklet);
+       queue_work(system_bh_wq, &intf->recv_work);
 
        return need_timer;
 }
-- 
2.17.1

Reply via email to