If we need large number of pages allocations in short time frame allocating
large number of pages in single operation performs better than many small
operations. If there is less allocation operations allocating large number of
pages in a fill operation would waste memory and performance.

The allocation sizes of pools is doubled everytime refill is done more than
once between shrink calls. While the size is halfed happens if there is no
refills for manager->shrink_alloc_interval seconds.

Signed-off-by: Pauli Nieminen <suok...@gmail.com>
---
 drivers/gpu/drm/ttm/ttm_page_alloc.c |  105 ++++++++++++++++++++++++++--------
 1 files changed, 81 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c 
b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 206bee9..af91994 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -47,8 +47,9 @@
 
 
 #define NUM_PAGES_TO_ALLOC             256
-#define SMALL_ALLOCATION               64
+#define SMALL_ALLOCATION               16
 #define FREE_ALL_PAGES                 1
+#define SHRINK_ALLOC_INTERVAL          8
 /* times are in msecs */
 #define PAGE_FREE_INTERVAL             1000
 
@@ -63,7 +64,10 @@
  * @gfp_flags: Flags to pass for alloc_page.
  * @npages: Number of pages in pool
  * @nlowpages: Minimum nubmer of pages in pool since previous shrink
+ * @freed_pages: Number of pages freed in this shrinker run.
  * @alloc_size: Allocation sizes of this pool.
+ * @last_refill: Number of shrink calls (with more than alloc_size pages free)
+ * since last refill call.
  * operation.
  */
 struct ttm_page_pool {
@@ -73,7 +77,9 @@ struct ttm_page_pool {
        int                     gfp_flags;
        unsigned                npages;
        unsigned                nlowpages;
+       unsigned                freed_pages;
        unsigned                alloc_size;
+       unsigned                last_refill;
        char                    *name;
        unsigned long           nfrees;
        unsigned long           nrefills;
@@ -101,6 +107,7 @@ struct ttm_pool_manager {
        atomic_t                page_alloc_inited;
        struct delayed_work     work;
        unsigned                small_allocation;
+       unsigned                shrink_alloc_interval;
 
        union {
                struct ttm_page_pool    pools[NUM_POOLS];
@@ -199,17 +206,33 @@ static void ttm_pages_put(struct page *pages[], unsigned 
npages)
  **/
 static bool ttm_reset_pools(struct ttm_pool_manager *manager)
 {
+       struct ttm_page_pool *pool;
        unsigned long irq_flags;
-       bool pages_in_pool = false;
+       bool more_delayed_work = false;
        unsigned i;
        for (i = 0; i < NUM_POOLS; ++i) {
-               spin_lock_irqsave(&manager->pools[i].lock, irq_flags);
-               manager->pools[i].nlowpages = manager->pools[i].npages;
-               pages_in_pool = pages_in_pool
-                       || manager->pools[i].npages > 
manager->pools[i].alloc_size;
-               spin_unlock_irqrestore(&manager->pools[i].lock, irq_flags);
+               pool = &manager->pools[i];
+               spin_lock_irqsave(&pool->lock, irq_flags);
+               /**
+                * Check if pool has shrink work in a second.
+                **/
+               if (pool->alloc_size > manager->small_allocation) {
+                       /* Reduce pool sizes if there is no refills for
+                        * manager->shrink_alloc_interval seconds. */
+                       if (++pool->last_refill % 
manager->shrink_alloc_interval == 0)
+                               pool->alloc_size /= 2;
+
+                       more_delayed_work = true;
+               } else
+                       more_delayed_work = more_delayed_work
+                               || pool->npages > pool->alloc_size;
+
+               pool->nlowpages = pool->npages;
+               pool->freed_pages = 0;
+
+               spin_unlock_irqrestore(&pool->lock, irq_flags);
        }
-       return pages_in_pool;
+       return more_delayed_work;
 }
 
 /**
@@ -220,16 +243,19 @@ static bool ttm_reset_pools(struct ttm_pool_manager 
*manager)
 static unsigned ttm_page_pool_get_npages_to_free_locked(struct ttm_page_pool 
*pool)
 {
        unsigned r;
+       unsigned low = pool->nlowpages - pool->freed_pages;
+       if (pool->nlowpages < pool->freed_pages)
+               low = 0;
        /* If less than alloc sizes was the lowest number of pages we don't
         * free any */
-       if (pool->nlowpages < pool->alloc_size)
+       if (low < pool->alloc_size)
                return 0;
        /* leave half of unused pages to pool */
-       r = (pool->nlowpages - pool->alloc_size)/2;
+       r = (low - pool->alloc_size)/2;
        if (r)
                return r;
        /* make sure we remove all pages even when there is rounding down */
-       if (pool->nlowpages)
+       if (low)
                return 1;
        return 0;
 }
@@ -251,12 +277,12 @@ static bool ttm_page_pool_free_pages_locked(struct 
ttm_page_pool *pool,
         */
        tmp = 2*freed_pages;
        /* protect against rounding errors */
-       if (tmp < pool->nlowpages) {
-               pool->nlowpages -= tmp;
+       if (tmp < pool->nlowpages - pool->freed_pages) {
+               pool->freed_pages += tmp;
                return true;
        }
 
-       pool->nlowpages = 0;
+       pool->freed_pages = pool->nlowpages;
        return false;
 }
 
@@ -329,7 +355,7 @@ restart:
        /* set nlowpages to zero to prevent extra freeing in thsi patch.
         * nlowpages is reseted later after all work has been finnished.
         **/
-       pool->nlowpages = 0;
+       pool->freed_pages = pool->nlowpages;
 
        /* remove range of pages from the pool */
        if (freed_pages)
@@ -516,6 +542,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool 
*pool,
        struct page *p, *tmp;
        int r;
        unsigned cpages = 0;
+       unsigned alloc_size;
+       bool queue_shrink = false;
        /**
         * Only allow one pool fill operation at a time.
         * If pool doesn't have enough pages for the allocation new pages are
@@ -528,6 +556,24 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool 
*pool,
 
        if (count < _manager.small_allocation
                && count > pool->npages) {
+               /* Increase allocation sizes if we are refilling pool multiple
+                * times between shrink calls. Clamp alloc size of the pool
+                * to NUM_PAGES_TO_ALLOC * 2. */
+               if (pool->last_refill == 0
+                       && pool->alloc_size < NUM_PAGES_TO_ALLOC * 2) {
+                       pool->alloc_size *= 2;
+                       queue_shrink = true;
+               }
+
+               pool->last_refill = 0;
+               alloc_size = pool->alloc_size;
+
+               /* clamp size to NUM_PAGES_TO_ALLOC because it is maximum
+                * number of pages in a single caching change.
+                */
+               if (alloc_size > NUM_PAGES_TO_ALLOC)
+                       alloc_size = NUM_PAGES_TO_ALLOC;
+
                /* If allocation request is small and there is not enough
                 * pages in pool we fill the pool first */
                INIT_LIST_HEAD(&new_pages);
@@ -538,16 +584,20 @@ static void ttm_page_pool_fill_locked(struct 
ttm_page_pool *pool,
                 */
                spin_unlock_irqrestore(&pool->lock, irq_flags);
                r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
-                               cstate, pool->alloc_size);
+                               cstate, alloc_size);
+               if (queue_shrink)
+                       (void)queue_delayed_work(_manager.glob->swap_queue,
+                                       &_manager.work,
+                                       round_jiffies(_manager.free_interval));
                spin_lock_irqsave(&pool->lock, irq_flags);
 
                if (!r) {
                        list_splice(&new_pages, &pool->list);
                        ++pool->nrefills;
-                       pool->npages += pool->alloc_size;
+                       pool->npages += alloc_size;
                        /* Have to remmber to update the low number of pages
                         * too */
-                       pool->nlowpages += pool->alloc_size;
+                       pool->nlowpages += alloc_size;
                } else {
                        printk(KERN_ERR "[ttm] Failed to fill pool (%p).", 
pool);
                        /* If we have any pages left put them to the pool. */
@@ -738,7 +788,9 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool 
*pool, int flags,
        pool->fill_lock = false;
        INIT_LIST_HEAD(&pool->list);
        pool->npages = pool->nlowpages = pool->nfrees = 0;
-       pool->alloc_size = NUM_PAGES_TO_ALLOC;
+       pool->last_refill = 0;
+       pool->freed_pages = 0;
+       pool->alloc_size = SMALL_ALLOCATION;
        pool->gfp_flags = flags;
        pool->name = name;
 }
@@ -762,10 +814,14 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob)
 
        _manager.free_interval = msecs_to_jiffies(PAGE_FREE_INTERVAL);
        _manager.small_allocation = SMALL_ALLOCATION;
+       _manager.shrink_alloc_interval = SHRINK_ALLOC_INTERVAL;
        _manager.glob = glob;
 
        INIT_DELAYED_WORK(&_manager.work, ttm_pool_shrink);
 
+       (void)queue_delayed_work(_manager.glob->swap_queue,
+                       &_manager.work,
+                       round_jiffies(_manager.free_interval));
        return 0;
 }
 
@@ -789,20 +845,21 @@ int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
 {
        struct ttm_page_pool *p;
        unsigned i;
-       char *h[] = {"pool", "refills", "pages freed", "size", "min size"};
+       char *h[] = {"pool", "refills", "pages freed", "size", "min size",
+               "alloc size"};
        if (atomic_read(&_manager.page_alloc_inited) == 0) {
                seq_printf(m, "No pool allocator running.\n");
                return 0;
        }
-       seq_printf(m, "%6s %12s %13s %8s %8s\n",
-                       h[0], h[1], h[2], h[3], h[4]);
+       seq_printf(m, "%6s %12s %13s %8s %8s %8s\n",
+                       h[0], h[1], h[2], h[3], h[4], h[5]);
        for (i = 0; i < NUM_POOLS; ++i) {
                p = &_manager.pools[i];
 
-               seq_printf(m, "%6s %12ld %13ld %8d %8d\n",
+               seq_printf(m, "%6s %12ld %13ld %8d %8d %8d\n",
                                p->name, p->nrefills,
                                p->nfrees, p->npages,
-                               p->nlowpages);
+                               p->nlowpages, p->alloc_size);
        }
        return 0;
 }
-- 
1.6.3.3


------------------------------------------------------------------------------
Download Intel&#174; Parallel Studio Eval
Try the new software tools for yourself. Speed compiling, find bugs
proactively, and fine-tune applications for parallel performance.
See why Intel Parallel Studio got high marks during beta.
http://p.sf.net/sfu/intel-sw-dev
--
_______________________________________________
Dri-devel mailing list
Dri-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/dri-devel

Reply via email to