dma_pool_alloc() scales poorly when allocating a large number of pages
because it does a linear scan of all previously-allocated pages before
allocating a new one.  Improve its scalability by maintaining a separate
list of pages that have free blocks ready to (re)allocate.  In big O
notation, this improves the algorithm from O(n) to O(1).

Signed-off-by: Tony Battersby <to...@cybernetics.com>
---

Changes since v5:
pool_free_page() no longer exists.
Updated big O usage in description.

 mm/dmapool.c | 26 ++++++++++++++++++++++----
 1 file changed, 22 insertions(+), 4 deletions(-)

diff --git a/mm/dmapool.c b/mm/dmapool.c
index 4e075feb038f..fc9ae0683c20 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -17,6 +17,10 @@
  * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
  * list of free blocks within the page.  Used blocks aren't tracked, but we
  * keep a count of how many are currently allocated from each page.
+ *
+ * The avail_page_list keeps track of pages that have one or more free blocks
+ * available to (re)allocate.  Pages are moved in and out of avail_page_list
+ * as their blocks are allocated and freed.
  */
 
 #include <linux/device.h>
@@ -42,6 +46,7 @@
 
 struct dma_pool {              /* the pool */
        struct list_head page_list;
+       struct list_head avail_page_list;
        spinlock_t lock;
        struct device *dev;
        unsigned int size;
@@ -54,6 +59,7 @@ struct dma_pool {             /* the pool */
 
 struct dma_page {              /* cacheable header for 'allocation' bytes */
        struct list_head page_list;
+       struct list_head avail_page_link;
        void *vaddr;
        dma_addr_t dma;
        unsigned int in_use;
@@ -155,6 +161,7 @@ struct dma_pool *dma_pool_create(const char *name, struct 
device *dev,
        retval->dev = dev;
 
        INIT_LIST_HEAD(&retval->page_list);
+       INIT_LIST_HEAD(&retval->avail_page_list);
        spin_lock_init(&retval->lock);
        retval->size = size;
        retval->boundary = boundary;
@@ -311,10 +318,11 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t 
mem_flags,
        might_alloc(mem_flags);
 
        spin_lock_irqsave(&pool->lock, flags);
-       list_for_each_entry(page, &pool->page_list, page_list) {
-               if (page->offset < pool->allocation)
-                       goto ready;
-       }
+       page = list_first_entry_or_null(&pool->avail_page_list,
+                                       struct dma_page,
+                                       avail_page_link);
+       if (page)
+               goto ready;
 
        /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
        spin_unlock_irqrestore(&pool->lock, flags);
@@ -326,10 +334,13 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t 
mem_flags,
        spin_lock_irqsave(&pool->lock, flags);
 
        list_add(&page->page_list, &pool->page_list);
+       list_add(&page->avail_page_link, &pool->avail_page_list);
  ready:
        page->in_use++;
        offset = page->offset;
        page->offset = *(int *)(page->vaddr + offset);
+       if (page->offset >= pool->allocation)
+               list_del_init(&page->avail_page_link);
        retval = offset + page->vaddr;
        *handle = offset + page->dma;
 #ifdef DMAPOOL_DEBUG
@@ -451,6 +462,13 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, 
dma_addr_t dma)
                memset(vaddr, 0, pool->size);
 #endif
 
+       /*
+        * list_empty() on the page tests if the page is already linked into
+        * avail_page_list to avoid adding it more than once.
+        */
+       if (list_empty(&page->avail_page_link))
+               list_add(&page->avail_page_link, &pool->avail_page_list);
+
        page->in_use--;
        *(int *)vaddr = page->offset;
        page->offset = offset;
-- 
2.25.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to