With one trivial change (taking the lock slightly earlier on wakeup
from schedule), all uses of the waitq are under the pool lock, so we
can use the locked (or __) versions of the wait queue functions, and
avoid the extra spinlock.
Signed-off-by: Matthew Wilcox <[EMAIL PROTECTED]>
---
mm/dmapool.c | 9 +++++----
1 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/mm/dmapool.c b/mm/dmapool.c
index 6201371..a359b5e 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -273,8 +273,8 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
size_t offset;
void *retval;
- restart:
spin_lock_irqsave(&pool->lock, flags);
+ restart:
list_for_each_entry(page, &pool->page_list, page_list) {
int i;
/* only cachable accesses here ... */
@@ -296,12 +296,13 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t
mem_flags,
DECLARE_WAITQUEUE(wait, current);
current->state = TASK_INTERRUPTIBLE;
- add_wait_queue(&pool->waitq, &wait);
+ __add_wait_queue(&pool->waitq, &wait);
spin_unlock_irqrestore(&pool->lock, flags);
schedule_timeout(POOL_TIMEOUT_JIFFIES);
- remove_wait_queue(&pool->waitq, &wait);
+ spin_lock_irqsave(&pool->lock, flags);
+ __remove_wait_queue(&pool->waitq, &wait);
goto restart;
}
retval = NULL;
@@ -401,7 +402,7 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr,
dma_addr_t dma)
page->in_use--;
set_bit(block, &page->bitmap[map]);
if (waitqueue_active(&pool->waitq))
- wake_up(&pool->waitq);
+ wake_up_locked(&pool->waitq);
/*
* Resist a temptation to do
* if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
--
1.5.3.1
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/