On 11/08/2011 12:40 AM, j.gli...@gmail.com wrote:
From: Jerome Glisse<jgli...@redhat.com>

Use the ttm_tt page ptr array for page allocation, move the list to
array unwinding into the page allocation functions.

V2 split the fix to use ttm put page as a separate fix
properly fill pages array when TTM_PAGE_FLAG_ZERO_ALLOC is not
set
V3 Added back page_count()==1 check when freeing page

NAK, this patch introduces a DOS vulnerability. It's allowing an arbitrary number of pages to be allocated *before* accounting them. Currently TTM is allowing a "small" (page size) memory size to be allocated before accounting. So page allocation must be interleaved with accounting on a per-page basis unless we can figure out a way to do the accounting *before* allocating the pages.

/Thomas


Signed-off-by: Jerome Glisse<jgli...@redhat.com>
Reviewed-by: Konrad Rzeszutek Wilk<konrad.w...@oracle.com>
---
  drivers/gpu/drm/ttm/ttm_memory.c     |   44 +++++++++++------
  drivers/gpu/drm/ttm/ttm_page_alloc.c |   90 ++++++++++++++++++++--------------
  drivers/gpu/drm/ttm/ttm_tt.c         |   61 ++++++++---------------
  include/drm/ttm/ttm_memory.h         |   11 ++--
  include/drm/ttm/ttm_page_alloc.h     |   17 +++---
  5 files changed, 115 insertions(+), 108 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index e70ddd8..3a3a58b 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -543,41 +543,53 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, 
uint64_t memory,
  }
  EXPORT_SYMBOL(ttm_mem_global_alloc);

-int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
-                             struct page *page,
-                             bool no_wait, bool interruptible)
+int ttm_mem_global_alloc_pages(struct ttm_mem_global *glob,
+                              struct page **pages,
+                              unsigned npages,
+                              bool no_wait, bool interruptible)
  {

        struct ttm_mem_zone *zone = NULL;
+       unsigned i;
+       int r;

        /**
         * Page allocations may be registed in a single zone
         * only if highmem or !dma32.
         */
-
+       for (i = 0; i<  npages; i++) {
  #ifdef CONFIG_HIGHMEM
-       if (PageHighMem(page)&&  glob->zone_highmem != NULL)
-               zone = glob->zone_highmem;
+               if (PageHighMem(pages[i])&&  glob->zone_highmem != NULL)
+                       zone = glob->zone_highmem;
  #else
-       if (glob->zone_dma32&&  page_to_pfn(page)>  0x00100000UL)
-               zone = glob->zone_kernel;
+               if (glob->zone_dma32&&  page_to_pfn(pages[i])>  0x00100000UL)
+                       zone = glob->zone_kernel;
  #endif
-       return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
-                                        interruptible);
+               r = ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
+                                             interruptible);
+               if (r) {
+                       return r;
+               }
+       }
+       return 0;
  }

-void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
+void ttm_mem_global_free_pages(struct ttm_mem_global *glob,
+                              struct page **pages, unsigned npages)
  {
        struct ttm_mem_zone *zone = NULL;
+       unsigned i;

+       for (i = 0; i<  npages; i++) {
  #ifdef CONFIG_HIGHMEM
-       if (PageHighMem(page)&&  glob->zone_highmem != NULL)
-               zone = glob->zone_highmem;
+               if (PageHighMem(pages[i])&&  glob->zone_highmem != NULL)
+                       zone = glob->zone_highmem;
  #else
-       if (glob->zone_dma32&&  page_to_pfn(page)>  0x00100000UL)
-               zone = glob->zone_kernel;
+               if (glob->zone_dma32&&  page_to_pfn(pages[i])>  0x00100000UL)
+                       zone = glob->zone_kernel;
  #endif
-       ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
+               ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
+       }
  }


diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c 
b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 727e93d..c4f18b9 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -619,8 +619,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool 
*pool,
   * @return count of pages still required to fulfill the request.
   */
  static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
-               struct list_head *pages, int ttm_flags,
-               enum ttm_caching_state cstate, unsigned count)
+                                       struct list_head *pages,
+                                       int ttm_flags,
+                                       enum ttm_caching_state cstate,
+                                       unsigned count)
  {
        unsigned long irq_flags;
        struct list_head *p;
@@ -664,13 +666,14 @@ out:
   * On success pages list will hold count number of correctly
   * cached pages.
   */
-int ttm_get_pages(struct list_head *pages, int flags,
-                 enum ttm_caching_state cstate, unsigned count,
-                 dma_addr_t *dma_address)
+int ttm_get_pages(struct page **pages, unsigned npages, int flags,
+                 enum ttm_caching_state cstate, dma_addr_t *dma_address)
  {
        struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
        struct page *p = NULL;
+       struct list_head plist;
        gfp_t gfp_flags = GFP_USER;
+       unsigned count = 0;
        int r;

        /* set zero flag for page allocation if required */
@@ -684,94 +687,107 @@ int ttm_get_pages(struct list_head *pages, int flags,
                else
                        gfp_flags |= GFP_HIGHUSER;

-               for (r = 0; r<  count; ++r) {
-                       p = alloc_page(gfp_flags);
-                       if (!p) {
-
+               for (count = 0; count<  npages; ++count) {
+                       pages[count] = alloc_page(gfp_flags);
+                       if (pages[count] == NULL) {
                                printk(KERN_ERR TTM_PFX
                                       "Unable to allocate page.");
                                return -ENOMEM;
                        }
-
-                       list_add(&p->lru, pages);
                }
                return 0;
        }

-
        /* combine zero flag to pool flags */
        gfp_flags |= pool->gfp_flags;

        /* First we take pages from the pool */
-       count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
+       INIT_LIST_HEAD(&plist);
+       npages = ttm_page_pool_get_pages(pool,&plist, flags, cstate, npages);

        /* clear the pages coming from the pool if requested */
+       count = 0;
+       list_for_each_entry(p,&plist, lru) {
+               pages[count++] = p;
+       }
        if (flags&  TTM_PAGE_FLAG_ZERO_ALLOC) {
-               list_for_each_entry(p, pages, lru) {
+               list_for_each_entry(p,&plist, lru) {
                        clear_page(page_address(p));
                }
        }

        /* If pool didn't have enough pages allocate new one. */
-       if (count>  0) {
+       if (npages>  0) {
                /* ttm_alloc_new_pages doesn't reference pool so we can run
                 * multiple requests in parallel.
                 **/
-               r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
+               INIT_LIST_HEAD(&plist);
+               r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, 
npages);
+               list_for_each_entry(p,&plist, lru) {
+                       pages[count++] = p;
+               }
                if (r) {
                        /* If there is any pages in the list put them back to
                         * the pool. */
                        printk(KERN_ERR TTM_PFX
                               "Failed to allocate extra pages "
                               "for large request.");
-                       ttm_put_pages(pages, 0, flags, cstate, NULL);
+                       ttm_put_pages(pages, count, flags, cstate, NULL);
                        return r;
                }
        }

-
        return 0;
  }

  /* Put all pages in pages list to correct pool to wait for reuse */
-void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
+void ttm_put_pages(struct page **pages, unsigned npages, int flags,
                   enum ttm_caching_state cstate, dma_addr_t *dma_address)
  {
        unsigned long irq_flags;
        struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
-       struct page *p, *tmp;
+       unsigned i;

        if (pool == NULL) {
                /* No pool for this memory type so free the pages */

-               list_for_each_entry_safe(p, tmp, pages, lru) {
-                       __free_page(p);
+               for (i = 0; i<  npages; i++) {
+                       if (pages[i]) {
+                               if (page_count(pages[i]) != 1)
+                                       printk(KERN_ERR TTM_PFX
+                                               "Erroneous page count. "
+                                               "Leaking pages.\n");
+                               __free_page(pages[i]);
+                               pages[i] = NULL;
+                       }
                }
-               /* Make the pages list empty */
-               INIT_LIST_HEAD(pages);
                return;
        }
-       if (page_count == 0) {
-               list_for_each_entry_safe(p, tmp, pages, lru) {
-                       ++page_count;
-               }
-       }

        spin_lock_irqsave(&pool->lock, irq_flags);
-       list_splice_init(pages,&pool->list);
-       pool->npages += page_count;
+       for (i = 0; i<  npages; i++) {
+               if (pages[i]) {
+                       if (page_count(pages[i]) != 1)
+                               printk(KERN_ERR TTM_PFX
+                                       "Erroneous page count. "
+                                       "Leaking pages.\n");
+                       list_add_tail(&pages[i]->lru,&pool->list);
+                       pages[i] = NULL;
+                       pool->npages++;
+               }
+       }
        /* Check that we don't go over the pool limit */
-       page_count = 0;
+       npages = 0;
        if (pool->npages>  _manager->options.max_size) {
-               page_count = pool->npages - _manager->options.max_size;
+               npages = pool->npages - _manager->options.max_size;
                /* free at least NUM_PAGES_TO_ALLOC number of pages
                 * to reduce calls to set_memory_wb */
-               if (page_count<  NUM_PAGES_TO_ALLOC)
-                       page_count = NUM_PAGES_TO_ALLOC;
+               if (npages<  NUM_PAGES_TO_ALLOC)
+                       npages = NUM_PAGES_TO_ALLOC;
        }
        spin_unlock_irqrestore(&pool->lock, irq_flags);
-       if (page_count)
-               ttm_page_pool_free(pool, page_count);
+       if (npages)
+               ttm_page_pool_free(pool, npages);
  }

  static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 3fb4c6d..2dd45ca 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -65,33 +65,25 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
  static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
  {
        struct page *p;
-       struct list_head h;
        struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
        int ret;

        if (NULL == (p = ttm->pages[index])) {

-               INIT_LIST_HEAD(&h);
-
-               ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
+               ret = ttm_get_pages(&ttm->pages[index], 1, ttm->page_flags,
+                                   ttm->caching_state,
                                &ttm->dma_address[index]);
-
                if (ret != 0)
                        return NULL;

-               p = list_first_entry(&h, struct page, lru);
-
-               ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
+               ret = ttm_mem_global_alloc_pages(mem_glob,&ttm->pages[index],
+                                                1, false, false);
                if (unlikely(ret != 0))
                        goto out_err;
-
-               ttm->pages[index] = p;
        }
        return p;
  out_err:
-       INIT_LIST_HEAD(&h);
-       list_add(&p->lru,&h);
-       ttm_put_pages(&h, 1, ttm->page_flags,
+       ttm_put_pages(&ttm->pages[index], 1, ttm->page_flags,
                      ttm->caching_state,&ttm->dma_address[index]);
        return NULL;
  }
@@ -110,8 +102,7 @@ struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)

  int ttm_tt_populate(struct ttm_tt *ttm)
  {
-       struct page *page;
-       unsigned long i;
+       struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
        struct ttm_backend *be;
        int ret;

@@ -126,10 +117,16 @@ int ttm_tt_populate(struct ttm_tt *ttm)

        be = ttm->be;

-       for (i = 0; i<  ttm->num_pages; ++i) {
-               page = __ttm_tt_get_page(ttm, i);
-               if (!page)
-                       return -ENOMEM;
+       ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
+                           ttm->caching_state, ttm->dma_address);
+       if (ret != 0)
+               return -ENOMEM;
+       ret = ttm_mem_global_alloc_pages(mem_glob, ttm->pages,
+                                        ttm->num_pages, false, false);
+       if (unlikely(ret != 0)) {
+               ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
+                             ttm->caching_state, ttm->dma_address);
+               return -ENOMEM;
        }

        be->func->populate(be, ttm->num_pages, ttm->pages,
@@ -242,33 +239,15 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);

  static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
  {
-       int i;
-       unsigned count = 0;
-       struct list_head h;
-       struct page *cur_page;
        struct ttm_backend *be = ttm->be;
-
-       INIT_LIST_HEAD(&h);
+       struct ttm_mem_global *glob = ttm->glob->mem_glob;

        if (be)
                be->func->clear(be);
-       for (i = 0; i<  ttm->num_pages; ++i) {

-               cur_page = ttm->pages[i];
-               ttm->pages[i] = NULL;
-               if (cur_page) {
-                       if (page_count(cur_page) != 1)
-                               printk(KERN_ERR TTM_PFX
-                                      "Erroneous page count. "
-                                      "Leaking pages.\n");
-                       ttm_mem_global_free_page(ttm->glob->mem_glob,
-                                                cur_page);
-                       list_add(&cur_page->lru,&h);
-                       count++;
-               }
-       }
-       ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
-                     ttm->dma_address);
+       ttm_mem_global_free_pages(glob, ttm->pages, ttm->num_pages);
+       ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
+                       ttm->caching_state, ttm->dma_address);
        ttm->state = tt_unpopulated;
  }

diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 26c1f78..cee821e 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -150,10 +150,11 @@ extern int ttm_mem_global_alloc(struct ttm_mem_global 
*glob, uint64_t memory,
                                bool no_wait, bool interruptible);
  extern void ttm_mem_global_free(struct ttm_mem_global *glob,
                                uint64_t amount);
-extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
-                                    struct page *page,
-                                    bool no_wait, bool interruptible);
-extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
-                                    struct page *page);
+extern int ttm_mem_global_alloc_pages(struct ttm_mem_global *glob,
+                                     struct page **pages,
+                                     unsigned npages,
+                                     bool no_wait, bool interruptible);
+extern void ttm_mem_global_free_pages(struct ttm_mem_global *glob,
+                                     struct page **pages, unsigned npages);
  extern size_t ttm_round_pot(size_t size);
  #endif
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index 129de12..fffb3bd 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -32,29 +32,28 @@
  /**
   * Get count number of pages from pool to pages list.
   *
- * @pages: head of empty linked list where pages are filled.
+ * @pages: array of pages ptr
+ * @npages: number of pages to allocate.
   * @flags: ttm flags for page allocation.
   * @cstate: ttm caching state for the page.
- * @count: number of pages to allocate.
   * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
   */
-int ttm_get_pages(struct list_head *pages,
+int ttm_get_pages(struct page **pages,
+                 unsigned npages,
                  int flags,
                  enum ttm_caching_state cstate,
-                 unsigned count,
                  dma_addr_t *dma_address);
  /**
   * Put linked list of pages to pool.
   *
- * @pages: list of pages to free.
- * @page_count: number of pages in the list. Zero can be passed for unknown
- * count.
+ * @pages: array of pages ptr
+ * @npages: number of pages to free.
   * @flags: ttm flags for page allocation.
   * @cstate: ttm caching state.
   * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
   */
-void ttm_put_pages(struct list_head *pages,
-                  unsigned page_count,
+void ttm_put_pages(struct page **pages,
+                  unsigned npages,
                   int flags,
                   enum ttm_caching_state cstate,
                   dma_addr_t *dma_address);

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to