Re: [PATCH] block/umem: convert tasklet to threaded irq

2021-03-23 Thread Davidlohr Bueso

On Tue, 23 Mar 2021, Jens Axboe wrote:


Me too, I'd be surprised if anyone has used it in... forever. We can
probably drop it - I really dislike making core changes to something
that can't even be tested. Davidlohr, assuming you had no way of
testing this change?


No, no way of testing these changes - I got here via git grep.


Re: [PATCH] block/umem: convert tasklet to threaded irq

2021-03-23 Thread Jens Axboe
On 3/23/21 11:24 AM, Christoph Hellwig wrote:
> On Mon, Mar 22, 2021 at 05:48:56PM -0700, Davidlohr Bueso wrote:
>> Tasklets have long been deprecated as being too heavy on the system
>> by running in irq context - and this is not a performance critical
>> path. If a higher priority process wants to run, it must wait for
>> the tasklet to finish before doing so. A more suitable equivalent
>> is to converted to threaded irq instead and deal with the async
>> processing in task context.
> 
> I'm really curious if this driver is still in use at all, or if we
> can drop it.

Me too, I'd be surprised if anyone has used it in... forever. We can
probably drop it - I really dislike making core changes to something
that can't even be tested. Davidlohr, assuming you had no way of
testing this change?

-- 
Jens Axboe



Re: [PATCH] block/umem: convert tasklet to threaded irq

2021-03-23 Thread Christoph Hellwig
On Mon, Mar 22, 2021 at 05:48:56PM -0700, Davidlohr Bueso wrote:
> Tasklets have long been deprecated as being too heavy on the system
> by running in irq context - and this is not a performance critical
> path. If a higher priority process wants to run, it must wait for
> the tasklet to finish before doing so. A more suitable equivalent
> is to converted to threaded irq instead and deal with the async
> processing in task context.

I'm really curious if this driver is still in use at all, or if we
can drop it.


[PATCH] block/umem: convert tasklet to threaded irq

2021-03-22 Thread Davidlohr Bueso
Tasklets have long been deprecated as being too heavy on the system
by running in irq context - and this is not a performance critical
path. If a higher priority process wants to run, it must wait for
the tasklet to finish before doing so. A more suitable equivalent
is to converted to threaded irq instead and deal with the async
processing in task context.

Signed-off-by: Davidlohr Bueso 
---
 drivers/block/umem.c | 23 ++-
 1 file changed, 10 insertions(+), 13 deletions(-)

diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 982732dbe82e..6b0a110f9233 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -120,7 +120,6 @@ struct cardinfo {
 
int  Active, Ready;
 
-   struct tasklet_struct   tasklet;
unsigned int dma_status;
 
struct {
@@ -243,7 +242,7 @@ static void dump_dmastat(struct cardinfo *card, unsigned 
int dmastat)
  * overloaded to record whether it was a read or a write.
  *
  * The interrupt handler only polls the device to clear the interrupt.
- * The processing of the result is done in a tasklet.
+ * The processing of the result is done in threaded irq.
  */
 
 static void mm_start_io(struct cardinfo *card)
@@ -405,7 +404,7 @@ static int add_bio(struct cardinfo *card)
return 1;
 }
 
-static void process_page(unsigned long data)
+static irqreturn_t process_page(int irq, void *__card)
 {
/* check if any of the requests in the page are DMA_COMPLETE,
 * and deal with them appropriately.
@@ -415,10 +414,10 @@ static void process_page(unsigned long data)
 */
struct mm_page *page;
struct bio *return_bio = NULL;
-   struct cardinfo *card = (struct cardinfo *)data;
+   struct cardinfo *card = (struct cardinfo *)__card;
unsigned int dma_status = card->dma_status;
 
-   spin_lock(>lock);
+   spin_lock_bh(>lock);
if (card->Active < 0)
goto out_unlock;
page = >mm_pages[card->Active];
@@ -493,7 +492,7 @@ static void process_page(unsigned long data)
mm_start_io(card);
}
  out_unlock:
-   spin_unlock(>lock);
+   spin_unlock_bh(>lock);
 
while (return_bio) {
struct bio *bio = return_bio;
@@ -502,6 +501,8 @@ static void process_page(unsigned long data)
bio->bi_next = NULL;
bio_endio(bio);
}
+
+   return IRQ_HANDLED;
 }
 
 static void mm_unplug(struct blk_plug_cb *cb, bool from_schedule)
@@ -637,11 +638,10 @@ HW_TRACE(0x30);
 
/* and process the DMA descriptors */
card->dma_status = dma_status;
-   tasklet_schedule(>tasklet);
 
 HW_TRACE(0x36);
 
-   return IRQ_HANDLED;
+   return IRQ_WAKE_THREAD;
 }
 
 /*
@@ -891,8 +891,6 @@ static int mm_pci_probe(struct pci_dev *dev, const struct 
pci_device_id *id)
if (!card->queue)
goto failed_alloc;
 
-   tasklet_init(>tasklet, process_page, (unsigned long)card);
-
card->check_batteries = 0;
 
mem_present = readb(card->csr_remap + MEMCTRLSTATUS_MEMORY);
@@ -951,8 +949,8 @@ static int mm_pci_probe(struct pci_dev *dev, const struct 
pci_device_id *id)
data = ~data;
data += 1;
 
-   if (request_irq(dev->irq, mm_interrupt, IRQF_SHARED, DRIVER_NAME,
-   card)) {
+   if (request_threaded_irq(dev->irq, mm_interrupt, process_page,
+IRQF_SHARED, DRIVER_NAME, card)) {
dev_printk(KERN_ERR, >dev->dev,
"Unable to allocate IRQ\n");
ret = -ENODEV;
@@ -1015,7 +1013,6 @@ static void mm_pci_remove(struct pci_dev *dev)
 {
struct cardinfo *card = pci_get_drvdata(dev);
 
-   tasklet_kill(>tasklet);
free_irq(dev->irq, card);
iounmap(card->csr_remap);
 
-- 
2.26.2