On Tue, 2009-12-08 at 15:33 +0100, Jerome Glisse wrote:
> This change allow driver to pass sorted memory placement,
> from most prefered placement to least prefered placement.
> In order to avoid long function prototype a structure is
> used to gather memory placement informations such as range
> restriction (if you need a buffer to be in given range).
> Range restriction is determined by fpfn & lpfn which are
> the first page and last page number btw which allocation
> can happen. If those fields are set to 0 ttm will assume
> buffer can be put anywhere in the address space (thus it
> avoids putting a burden on the driver to always properly
> set those fields).
> 
> This patch also factor few functions like evicting first
> entry of lru list or getting a memory space. This avoid
> code duplication.
> 
> V2: Change API to use placement flags and array instead
>     of packing placement order into a quadword.
> V3: Make sure we set the appropriate mem.placement flag
>     when validating or allocation memory space.
A way to pass fpfn/lpfn to ttm_buffer_object_{init,create}() would be
really useful too.  Perhaps passing a struct ttm_placement rather than
flags to these functions?

Ben.
> 
> Signed-off-by: Jerome Glisse <jgli...@redhat.com>
> ---
>  drivers/gpu/drm/ttm/ttm_bo.c    |  463 
> +++++++++++++++++++--------------------
>  include/drm/ttm/ttm_bo_api.h    |   42 +++-
>  include/drm/ttm/ttm_bo_driver.h |   20 +--
>  3 files changed, 256 insertions(+), 269 deletions(-)
> 
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index 87c0625..9476bde 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -27,6 +27,14 @@
>  /*
>   * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
>   */
> +/* Notes:
> + *
> + * We store bo pointer in drm_mm_node struct so we know which bo own a
> + * specific node. There is no protection on the pointer, thus to make
> + * sure things don't go berserk you have to access this pointer while
> + * holding the global lru lock and make sure anytime you free a node you
> + * reset the pointer to NULL.
> + */
>  
>  #include "ttm/ttm_module.h"
>  #include "ttm/ttm_bo_driver.h"
> @@ -247,7 +255,6 @@ EXPORT_SYMBOL(ttm_bo_unreserve);
>  /*
>   * Call bo->mutex locked.
>   */
> -
>  static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
>  {
>       struct ttm_bo_device *bdev = bo->bdev;
> @@ -328,14 +335,8 @@ static int ttm_bo_handle_move_mem(struct 
> ttm_buffer_object *bo,
>               }
>  
>               if (bo->mem.mem_type == TTM_PL_SYSTEM) {
> -
> -                     struct ttm_mem_reg *old_mem = &bo->mem;
> -                     uint32_t save_flags = old_mem->placement;
> -
> -                     *old_mem = *mem;
> +                     bo->mem = *mem;
>                       mem->mm_node = NULL;
> -                     ttm_flag_masked(&save_flags, mem->placement,
> -                                     TTM_PL_MASK_MEMTYPE);
>                       goto moved;
>               }
>  
> @@ -418,6 +419,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object 
> *bo, bool remove_all)
>                       kref_put(&bo->list_kref, ttm_bo_ref_bug);
>               }
>               if (bo->mem.mm_node) {
> +                     bo->mem.mm_node->private = NULL;
>                       drm_mm_put_block(bo->mem.mm_node);
>                       bo->mem.mm_node = NULL;
>               }
> @@ -554,17 +556,14 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
>  }
>  EXPORT_SYMBOL(ttm_bo_unref);
>  
> -static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
> -                     bool interruptible, bool no_wait)
> +static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
> +                     bool no_wait)
>  {
> -     int ret = 0;
>       struct ttm_bo_device *bdev = bo->bdev;
>       struct ttm_bo_global *glob = bo->glob;
>       struct ttm_mem_reg evict_mem;
> -     uint32_t proposed_placement;
> -
> -     if (bo->mem.mem_type != mem_type)
> -             goto out;
> +     struct ttm_placement placement;
> +     int ret = 0;
>  
>       spin_lock(&bo->lock);
>       ret = ttm_bo_wait(bo, false, interruptible, no_wait);
> @@ -584,14 +583,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, 
> unsigned mem_type,
>       evict_mem = bo->mem;
>       evict_mem.mm_node = NULL;
>  
> -     proposed_placement = bdev->driver->evict_flags(bo);
> -
> -     ret = ttm_bo_mem_space(bo, proposed_placement,
> -                            &evict_mem, interruptible, no_wait);
> -     if (unlikely(ret != 0 && ret != -ERESTART))
> -             ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
> -                                    &evict_mem, interruptible, no_wait);
> -
> +     bdev->driver->evict_flags(bo, &placement);
> +     ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
> +                             no_wait);
>       if (ret) {
>               if (ret != -ERESTART)
>                       printk(KERN_ERR TTM_PFX
> @@ -605,95 +599,117 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, 
> unsigned mem_type,
>       if (ret) {
>               if (ret != -ERESTART)
>                       printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
> +             spin_lock(&glob->lru_lock);
> +             if (evict_mem.mm_node) {
> +                     evict_mem.mm_node->private = NULL;
> +                     drm_mm_put_block(evict_mem.mm_node);
> +                     evict_mem.mm_node = NULL;
> +             }
> +             spin_unlock(&glob->lru_lock);
>               goto out;
>       }
> +     bo->evicted = true;
> +out:
> +     return ret;
> +}
> +
> +static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
> +                             uint32_t mem_type,
> +                             bool interruptible, bool no_wait)
> +{
> +     struct ttm_bo_global *glob = bdev->glob;
> +     struct ttm_mem_type_manager *man = &bdev->man[mem_type];
> +     struct ttm_buffer_object *bo;
> +     int ret, put_count = 0;
>  
>       spin_lock(&glob->lru_lock);
> -     if (evict_mem.mm_node) {
> -             drm_mm_put_block(evict_mem.mm_node);
> -             evict_mem.mm_node = NULL;
> -     }
> +     bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
> +     kref_get(&bo->list_kref);
> +     ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0);
> +     if (likely(ret == 0))
> +             put_count = ttm_bo_del_from_lru(bo);
>       spin_unlock(&glob->lru_lock);
> -     bo->evicted = true;
> -out:
> +     if (unlikely(ret != 0))
> +             return ret;
> +     while (put_count--)
> +             kref_put(&bo->list_kref, ttm_bo_ref_bug);
> +     ret = ttm_bo_evict(bo, interruptible, no_wait);
> +     ttm_bo_unreserve(bo);
> +     kref_put(&bo->list_kref, ttm_bo_release_list);
>       return ret;
>  }
>  
> +static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
> +                             struct ttm_mem_type_manager *man,
> +                             struct ttm_placement *placement,
> +                             struct ttm_mem_reg *mem,
> +                             struct drm_mm_node **node)
> +{
> +     struct ttm_bo_global *glob = bo->glob;
> +     unsigned long lpfn;
> +     int ret;
> +
> +     lpfn = placement->lpfn;
> +     if (!lpfn)
> +             lpfn = man->size;
> +     *node = NULL;
> +     do {
> +             ret = drm_mm_pre_get(&man->manager);
> +             if (unlikely(ret))
> +                     return ret;
> +
> +             spin_lock(&glob->lru_lock);
> +             *node = drm_mm_search_free_in_range(&man->manager,
> +                                     mem->num_pages, mem->page_alignment,
> +                                     placement->fpfn, lpfn, 1);
> +             if (unlikely(*node == NULL)) {
> +                     spin_unlock(&glob->lru_lock);
> +                     return 0;
> +             }
> +             *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
> +                                                     mem->page_alignment,
> +                                                     placement->fpfn,
> +                                                     lpfn);
> +             spin_unlock(&glob->lru_lock);
> +     } while (*node == NULL);
> +     return 0;
> +}
> +
>  /**
>   * Repeatedly evict memory from the LRU for @mem_type until we create enough
>   * space, or we've evicted everything and there isn't enough space.
>   */
> -static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
> -                               struct ttm_mem_reg *mem,
> -                               uint32_t mem_type,
> -                               bool interruptible, bool no_wait)
> +static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
> +                                     uint32_t mem_type,
> +                                     struct ttm_placement *placement,
> +                                     struct ttm_mem_reg *mem,
> +                                     bool interruptible, bool no_wait)
>  {
> +     struct ttm_bo_device *bdev = bo->bdev;
>       struct ttm_bo_global *glob = bdev->glob;
> -     struct drm_mm_node *node;
> -     struct ttm_buffer_object *entry;
>       struct ttm_mem_type_manager *man = &bdev->man[mem_type];
> -     struct list_head *lru;
> -     unsigned long num_pages = mem->num_pages;
> -     int put_count = 0;
> +     struct drm_mm_node *node;
>       int ret;
>  
> -retry_pre_get:
> -     ret = drm_mm_pre_get(&man->manager);
> -     if (unlikely(ret != 0))
> -             return ret;
> -
> -     spin_lock(&glob->lru_lock);
>       do {
> -             node = drm_mm_search_free(&man->manager, num_pages,
> -                                       mem->page_alignment, 1);
> +             ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
> +             if (unlikely(ret != 0))
> +                     return ret;
>               if (node)
>                       break;
> -
> -             lru = &man->lru;
> -             if (list_empty(lru))
> +             spin_lock(&glob->lru_lock);
> +             if (list_empty(&man->lru)) {
> +                     spin_unlock(&glob->lru_lock);
>                       break;
> -
> -             entry = list_first_entry(lru, struct ttm_buffer_object, lru);
> -             kref_get(&entry->list_kref);
> -
> -             ret =
> -                 ttm_bo_reserve_locked(entry, interruptible, no_wait,
> -                                       false, 0);
> -
> -             if (likely(ret == 0))
> -                     put_count = ttm_bo_del_from_lru(entry);
> -
> +             }
>               spin_unlock(&glob->lru_lock);
> -
> +             ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
> +                                             no_wait);
>               if (unlikely(ret != 0))
>                       return ret;
> -
> -             while (put_count--)
> -                     kref_put(&entry->list_kref, ttm_bo_ref_bug);
> -
> -             ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
> -
> -             ttm_bo_unreserve(entry);
> -
> -             kref_put(&entry->list_kref, ttm_bo_release_list);
> -             if (ret)
> -                     return ret;
> -
> -             spin_lock(&glob->lru_lock);
>       } while (1);
> -
> -     if (!node) {
> -             spin_unlock(&glob->lru_lock);
> +     if (node == NULL)
>               return -ENOMEM;
> -     }
> -
> -     node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
> -     if (unlikely(!node)) {
> -             spin_unlock(&glob->lru_lock);
> -             goto retry_pre_get;
> -     }
> -
> -     spin_unlock(&glob->lru_lock);
>       mem->mm_node = node;
>       mem->mem_type = mem_type;
>       return 0;
> @@ -724,7 +740,6 @@ static uint32_t ttm_bo_select_caching(struct 
> ttm_mem_type_manager *man,
>       return result;
>  }
>  
> -
>  static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
>                                bool disallow_fixed,
>                                uint32_t mem_type,
> @@ -748,6 +763,18 @@ static bool ttm_bo_mt_compatible(struct 
> ttm_mem_type_manager *man,
>       return true;
>  }
>  
> +static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
> +{
> +     int i;
> +
> +     for (i = 0; i <= TTM_PL_PRIV5; i++)
> +             if (flags & (1 << i)) {
> +                     *mem_type = i;
> +                     return 0;
> +             }
> +     return -EINVAL;
> +}
> +
>  /**
>   * Creates space for memory region @mem according to its type.
>   *
> @@ -757,66 +784,55 @@ static bool ttm_bo_mt_compatible(struct 
> ttm_mem_type_manager *man,
>   * space.
>   */
>  int ttm_bo_mem_space(struct ttm_buffer_object *bo,
> -                  uint32_t proposed_placement,
> -                  struct ttm_mem_reg *mem,
> -                  bool interruptible, bool no_wait)
> +                     struct ttm_placement *placement,
> +                     struct ttm_mem_reg *mem,
> +                     bool interruptible, bool no_wait)
>  {
>       struct ttm_bo_device *bdev = bo->bdev;
> -     struct ttm_bo_global *glob = bo->glob;
>       struct ttm_mem_type_manager *man;
> -
> -     uint32_t num_prios = bdev->driver->num_mem_type_prio;
> -     const uint32_t *prios = bdev->driver->mem_type_prio;
> -     uint32_t i;
>       uint32_t mem_type = TTM_PL_SYSTEM;
>       uint32_t cur_flags = 0;
>       bool type_found = false;
>       bool type_ok = false;
>       bool has_eagain = false;
>       struct drm_mm_node *node = NULL;
> -     int ret;
> +     int i, ret;
>  
>       mem->mm_node = NULL;
> -     for (i = 0; i < num_prios; ++i) {
> -             mem_type = prios[i];
> +     for (i = 0; i <= placement->num_placement; ++i) {
> +             ret = ttm_mem_type_from_flags(placement->placement[i],
> +                                             &mem_type);
> +             if (ret)
> +                     return ret;
>               man = &bdev->man[mem_type];
>  
>               type_ok = ttm_bo_mt_compatible(man,
> -                                            bo->type == ttm_bo_type_user,
> -                                            mem_type, proposed_placement,
> -                                            &cur_flags);
> +                                             bo->type == ttm_bo_type_user,
> +                                             mem_type,
> +                                             placement->placement[i],
> +                                             &cur_flags);
>  
>               if (!type_ok)
>                       continue;
>  
>               cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
>                                                 cur_flags);
> +             /*
> +              * Use the access and other non-mapping-related flag bits from
> +              * the memory placement flags to the current flags
> +              */
> +             ttm_flag_masked(&cur_flags, placement->placement[i],
> +                             ~TTM_PL_MASK_MEMTYPE);
>  
>               if (mem_type == TTM_PL_SYSTEM)
>                       break;
>  
>               if (man->has_type && man->use_type) {
>                       type_found = true;
> -                     do {
> -                             ret = drm_mm_pre_get(&man->manager);
> -                             if (unlikely(ret))
> -                                     return ret;
> -
> -                             spin_lock(&glob->lru_lock);
> -                             node = drm_mm_search_free(&man->manager,
> -                                                       mem->num_pages,
> -                                                       mem->page_alignment,
> -                                                       1);
> -                             if (unlikely(!node)) {
> -                                     spin_unlock(&glob->lru_lock);
> -                                     break;
> -                             }
> -                             node = drm_mm_get_block_atomic(node,
> -                                                            mem->num_pages,
> -                                                            mem->
> -                                                            page_alignment);
> -                             spin_unlock(&glob->lru_lock);
> -                     } while (!node);
> +                     ret = ttm_bo_man_get_node(bo, man, placement, mem,
> +                                                     &node);
> +                     if (unlikely(ret))
> +                             return ret;
>               }
>               if (node)
>                       break;
> @@ -826,43 +842,48 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
>               mem->mm_node = node;
>               mem->mem_type = mem_type;
>               mem->placement = cur_flags;
> +             if (node)
> +                     node->private = bo;
>               return 0;
>       }
>  
>       if (!type_found)
>               return -EINVAL;
>  
> -     num_prios = bdev->driver->num_mem_busy_prio;
> -     prios = bdev->driver->mem_busy_prio;
> -
> -     for (i = 0; i < num_prios; ++i) {
> -             mem_type = prios[i];
> +     for (i = 0; i <= placement->num_busy_placement; ++i) {
> +             ret = ttm_mem_type_from_flags(placement->placement[i],
> +                                             &mem_type);
> +             if (ret)
> +                     return ret;
>               man = &bdev->man[mem_type];
> -
>               if (!man->has_type)
>                       continue;
> -
>               if (!ttm_bo_mt_compatible(man,
> -                                       bo->type == ttm_bo_type_user,
> -                                       mem_type,
> -                                       proposed_placement, &cur_flags))
> +                                             bo->type == ttm_bo_type_user,
> +                                             mem_type,
> +                                             placement->placement[i],
> +                                             &cur_flags))
>                       continue;
>  
>               cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
>                                                 cur_flags);
> +             /*
> +              * Use the access and other non-mapping-related flag bits from
> +              * the memory placement flags to the current flags
> +              */
> +             ttm_flag_masked(&cur_flags, placement->placement[i],
> +                             ~TTM_PL_MASK_MEMTYPE);
>  
> -             ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
> -                                          interruptible, no_wait);
> -
> +             ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
> +                                             interruptible, no_wait);
>               if (ret == 0 && mem->mm_node) {
>                       mem->placement = cur_flags;
> +                     mem->mm_node->private = bo;
>                       return 0;
>               }
> -
>               if (ret == -ERESTART)
>                       has_eagain = true;
>       }
> -
>       ret = (has_eagain) ? -ERESTART : -ENOMEM;
>       return ret;
>  }
> @@ -885,8 +906,8 @@ int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool 
> no_wait)
>  }
>  
>  int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
> -                    uint32_t proposed_placement,
> -                    bool interruptible, bool no_wait)
> +                     struct ttm_placement *placement,
> +                     bool interruptible, bool no_wait)
>  {
>       struct ttm_bo_global *glob = bo->glob;
>       int ret = 0;
> @@ -899,101 +920,82 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
>        * Have the driver move function wait for idle when necessary,
>        * instead of doing it here.
>        */
> -
>       spin_lock(&bo->lock);
>       ret = ttm_bo_wait(bo, false, interruptible, no_wait);
>       spin_unlock(&bo->lock);
> -
>       if (ret)
>               return ret;
> -
>       mem.num_pages = bo->num_pages;
>       mem.size = mem.num_pages << PAGE_SHIFT;
>       mem.page_alignment = bo->mem.page_alignment;
> -
>       /*
>        * Determine where to move the buffer.
>        */
> -
> -     ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
> -                            interruptible, no_wait);
> +     ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
>       if (ret)
>               goto out_unlock;
> -
>       ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
> -
>  out_unlock:
>       if (ret && mem.mm_node) {
>               spin_lock(&glob->lru_lock);
> +             mem.mm_node->private = NULL;
>               drm_mm_put_block(mem.mm_node);
>               spin_unlock(&glob->lru_lock);
>       }
>       return ret;
>  }
>  
> -static int ttm_bo_mem_compat(uint32_t proposed_placement,
> +static int ttm_bo_mem_compat(struct ttm_placement *placement,
>                            struct ttm_mem_reg *mem)
>  {
> -     if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
> -             return 0;
> -     if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
> -             return 0;
> -
> -     return 1;
> +     int i;
> +
> +     for (i = 0; i < placement->num_placement; i++) {
> +             if ((placement->placement[i] & mem->placement &
> +                     TTM_PL_MASK_CACHING) &&
> +                     (placement->placement[i] & mem->placement &
> +                     TTM_PL_MASK_MEM))
> +                     return i;
> +     }
> +     return -1;
>  }
>  
>  int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
> -                            uint32_t proposed_placement,
> -                            bool interruptible, bool no_wait)
> +                             struct ttm_placement *placement,
> +                             bool interruptible, bool no_wait)
>  {
>       int ret;
>  
>       BUG_ON(!atomic_read(&bo->reserved));
> -     bo->proposed_placement = proposed_placement;
> -
> -     TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
> -               (unsigned long)proposed_placement,
> -               (unsigned long)bo->mem.placement);
> -
> +     /* Check that range is valid */
> +     if (placement->lpfn || placement->fpfn)
> +             if (placement->fpfn > placement->lpfn ||
> +                     (placement->lpfn - placement->fpfn) < bo->num_pages)
> +                     return -EINVAL;
>       /*
>        * Check whether we need to move buffer.
>        */
> -
> -     if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
> -             ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
> -                                      interruptible, no_wait);
> -             if (ret) {
> -                     if (ret != -ERESTART)
> -                             printk(KERN_ERR TTM_PFX
> -                                    "Failed moving buffer. "
> -                                    "Proposed placement 0x%08x\n",
> -                                    bo->proposed_placement);
> -                     if (ret == -ENOMEM)
> -                             printk(KERN_ERR TTM_PFX
> -                                    "Out of aperture space or "
> -                                    "DRM memory quota.\n");
> +     ret = ttm_bo_mem_compat(placement, &bo->mem);
> +     if (ret < 0) {
> +             ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
> +             if (ret)
>                       return ret;
> -             }
> +     } else {
> +             /*
> +              * Use the access and other non-mapping-related flag bits from
> +              * the compatible memory placement flags to the active flags
> +              */
> +             ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
> +                             ~TTM_PL_MASK_MEMTYPE);
>       }
> -
>       /*
>        * We might need to add a TTM.
>        */
> -
>       if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
>               ret = ttm_bo_add_ttm(bo, true);
>               if (ret)
>                       return ret;
>       }
> -     /*
> -      * Validation has succeeded, move the access and other
> -      * non-mapping-related flag bits from the proposed flags to
> -      * the active flags
> -      */
> -
> -     ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
> -                     ~TTM_PL_MASK_MEMTYPE);
> -
>       return 0;
>  }
>  EXPORT_SYMBOL(ttm_buffer_object_validate);
> @@ -1041,8 +1043,10 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
>                          size_t acc_size,
>                          void (*destroy) (struct ttm_buffer_object *))
>  {
> -     int ret = 0;
> +     int i, c, ret = 0;
>       unsigned long num_pages;
> +     uint32_t placements[8];
> +     struct ttm_placement placement;
>  
>       size += buffer_start & ~PAGE_MASK;
>       num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
> @@ -1099,7 +1103,16 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
>                       goto out_err;
>       }
>  
> -     ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
> +     placement.fpfn = 0;
> +     placement.lpfn = 0;
> +     for (i = 0, c = 0; i <= TTM_PL_PRIV5; i++)
> +             if (flags & (1 << i))
> +                     placements[c++] = (flags & ~TTM_PL_MASK_MEM) | (1 << i);
> +     placement.placement = placements;
> +     placement.num_placement = c;
> +     placement.busy_placement = placements;
> +     placement.num_busy_placement = c;
> +     ret = ttm_buffer_object_validate(bo, &placement, interruptible, false);
>       if (ret)
>               goto out_err;
>  
> @@ -1134,8 +1147,8 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
>                            struct ttm_buffer_object **p_bo)
>  {
>       struct ttm_buffer_object *bo;
> -     int ret;
>       struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
> +     int ret;
>  
>       size_t acc_size =
>           ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
> @@ -1160,66 +1173,32 @@ int ttm_buffer_object_create(struct ttm_bo_device 
> *bdev,
>       return ret;
>  }
>  
> -static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
> -                          uint32_t mem_type, bool allow_errors)
> -{
> -     int ret;
> -
> -     spin_lock(&bo->lock);
> -     ret = ttm_bo_wait(bo, false, false, false);
> -     spin_unlock(&bo->lock);
> -
> -     if (ret && allow_errors)
> -             goto out;
> -
> -     if (bo->mem.mem_type == mem_type)
> -             ret = ttm_bo_evict(bo, mem_type, false, false);
> -
> -     if (ret) {
> -             if (allow_errors) {
> -                     goto out;
> -             } else {
> -                     ret = 0;
> -                     printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
> -             }
> -     }
> -
> -out:
> -     return ret;
> -}
> -
>  static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
> -                                struct list_head *head,
> -                                unsigned mem_type, bool allow_errors)
> +                                     unsigned mem_type, bool allow_errors)
>  {
> +     struct ttm_mem_type_manager *man = &bdev->man[mem_type];
>       struct ttm_bo_global *glob = bdev->glob;
> -     struct ttm_buffer_object *entry;
>       int ret;
> -     int put_count;
>  
>       /*
>        * Can't use standard list traversal since we're unlocking.
>        */
>  
>       spin_lock(&glob->lru_lock);
> -
> -     while (!list_empty(head)) {
> -             entry = list_first_entry(head, struct ttm_buffer_object, lru);
> -             kref_get(&entry->list_kref);
> -             ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
> -             put_count = ttm_bo_del_from_lru(entry);
> +     while (!list_empty(&man->lru)) {
>               spin_unlock(&glob->lru_lock);
> -             while (put_count--)
> -                     kref_put(&entry->list_kref, ttm_bo_ref_bug);
> -             BUG_ON(ret);
> -             ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
> -             ttm_bo_unreserve(entry);
> -             kref_put(&entry->list_kref, ttm_bo_release_list);
> +             ret = ttm_mem_evict_first(bdev, mem_type, false, false);
> +             if (ret) {
> +                     if (allow_errors) {
> +                             return ret;
> +                     } else {
> +                             printk(KERN_ERR TTM_PFX
> +                                     "Cleanup eviction failed\n");
> +                     }
> +             }
>               spin_lock(&glob->lru_lock);
>       }
> -
>       spin_unlock(&glob->lru_lock);
> -
>       return 0;
>  }
>  
> @@ -1246,7 +1225,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, 
> unsigned mem_type)
>  
>       ret = 0;
>       if (mem_type > 0) {
> -             ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
> +             ttm_bo_force_list_clean(bdev, mem_type, false);
>  
>               spin_lock(&glob->lru_lock);
>               if (drm_mm_clean(&man->manager))
> @@ -1279,12 +1258,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, 
> unsigned mem_type)
>               return 0;
>       }
>  
> -     return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
> +     return ttm_bo_force_list_clean(bdev, mem_type, true);
>  }
>  EXPORT_SYMBOL(ttm_bo_evict_mm);
>  
>  int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
> -                unsigned long p_offset, unsigned long p_size)
> +                     unsigned long p_size)
>  {
>       int ret = -EINVAL;
>       struct ttm_mem_type_manager *man;
> @@ -1314,7 +1293,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned 
> type,
>                              type);
>                       return ret;
>               }
> -             ret = drm_mm_init(&man->manager, p_offset, p_size);
> +             ret = drm_mm_init(&man->manager, 0, p_size);
>               if (ret)
>                       return ret;
>       }
> @@ -1463,7 +1442,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
>        * Initialize the system memory buffer type.
>        * Other types need to be driver / IOCTL initialized.
>        */
> -     ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
> +     ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
>       if (unlikely(ret != 0))
>               goto out_no_sys;
>  
> diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
> index 4911461..2f7f56d 100644
> --- a/include/drm/ttm/ttm_bo_api.h
> +++ b/include/drm/ttm/ttm_bo_api.h
> @@ -44,6 +44,29 @@ struct ttm_bo_device;
>  
>  struct drm_mm_node;
>  
> +
> +/**
> + * struct ttm_placement
> + *
> + * @fpfn:            first valid page frame number to put the object
> + * @lpfn:            last valid page frame number to put the object
> + * @num_placement:   number of prefered placements
> + * @placement:               prefered placements
> + * @num_busy_placement:      number of prefered placements when need to 
> evict buffer
> + * @busy_placement:  prefered placements when need to evict buffer
> + *
> + * Structure indicating the placement you request for an object.
> + */
> +struct ttm_placement {
> +     unsigned        fpfn;
> +     unsigned        lpfn;
> +     unsigned        num_placement;
> +     const uint32_t  *placement;
> +     unsigned        num_busy_placement;
> +     const uint32_t  *busy_placement;
> +};
> +
> +
>  /**
>   * struct ttm_mem_reg
>   *
> @@ -109,10 +132,6 @@ struct ttm_tt;
>   * the object is destroyed.
>   * @event_queue: Queue for processes waiting on buffer object status change.
>   * @lock: spinlock protecting mostly synchronization members.
> - * @proposed_placement: Proposed placement for the buffer. Changed only by 
> the
> - * creator prior to validation as opposed to bo->mem.proposed_flags which is
> - * changed by the implementation prior to a buffer move if it wants to 
> outsmart
> - * the buffer creator / user. This latter happens, for example, at eviction.
>   * @mem: structure describing current placement.
>   * @persistant_swap_storage: Usually the swap storage is deleted for buffers
>   * pinned in physical memory. If this behaviour is not desired, this member
> @@ -177,7 +196,6 @@ struct ttm_buffer_object {
>        * Members protected by the bo::reserved lock.
>        */
>  
> -     uint32_t proposed_placement;
>       struct ttm_mem_reg mem;
>       struct file *persistant_swap_storage;
>       struct ttm_tt *ttm;
> @@ -293,21 +311,22 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, 
> bool lazy,
>   * ttm_buffer_object_validate
>   *
>   * @bo: The buffer object.
> - * @proposed_placement: Proposed_placement for the buffer object.
> + * @placement: Proposed placement for the buffer object.
>   * @interruptible: Sleep interruptible if sleeping.
>   * @no_wait: Return immediately if the buffer is busy.
>   *
>   * Changes placement and caching policy of the buffer object
> - * according to bo::proposed_flags.
> + * according proposed placement.
>   * Returns
> - * -EINVAL on invalid proposed_flags.
> + * -EINVAL on invalid proposed placement.
>   * -ENOMEM on out-of-memory condition.
>   * -EBUSY if no_wait is true and buffer busy.
>   * -ERESTART if interrupted by a signal.
>   */
>  extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
> -                                   uint32_t proposed_placement,
> -                                   bool interruptible, bool no_wait);
> +                                     struct ttm_placement *placement,
> +                                     bool interruptible, bool no_wait);
> +
>  /**
>   * ttm_bo_unref
>   *
> @@ -445,7 +464,6 @@ extern int ttm_bo_check_placement(struct 
> ttm_buffer_object *bo,
>   *
>   * @bdev: Pointer to a ttm_bo_device struct.
>   * @mem_type: The memory type.
> - * @p_offset: offset for managed area in pages.
>   * @p_size: size managed area in pages.
>   *
>   * Initialize a manager for a given memory type.
> @@ -458,7 +476,7 @@ extern int ttm_bo_check_placement(struct 
> ttm_buffer_object *bo,
>   */
>  
>  extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
> -                       unsigned long p_offset, unsigned long p_size);
> +                             unsigned long p_size);
>  /**
>   * ttm_bo_clean_mm
>   *
> diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
> index e8cd6d2..1dd1dbc 100644
> --- a/include/drm/ttm/ttm_bo_driver.h
> +++ b/include/drm/ttm/ttm_bo_driver.h
> @@ -242,12 +242,6 @@ struct ttm_mem_type_manager {
>  /**
>   * struct ttm_bo_driver
>   *
> - * @mem_type_prio: Priority array of memory types to place a buffer object in
> - * if it fits without evicting buffers from any of these memory types.
> - * @mem_busy_prio: Priority array of memory types to place a buffer object in
> - * if it needs to evict buffers to make room.
> - * @num_mem_type_prio: Number of elements in the @mem_type_prio array.
> - * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
>   * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
>   * @invalidate_caches: Callback to invalidate read caches when a buffer 
> object
>   * has been evicted.
> @@ -265,11 +259,6 @@ struct ttm_mem_type_manager {
>   */
>  
>  struct ttm_bo_driver {
> -     const uint32_t *mem_type_prio;
> -     const uint32_t *mem_busy_prio;
> -     uint32_t num_mem_type_prio;
> -     uint32_t num_mem_busy_prio;
> -
>       /**
>        * struct ttm_bo_driver member create_ttm_backend_entry
>        *
> @@ -306,7 +295,8 @@ struct ttm_bo_driver {
>        * finished, they'll end up in bo->mem.flags
>        */
>  
> -      uint32_t(*evict_flags) (struct ttm_buffer_object *bo);
> +      void(*evict_flags) (struct ttm_buffer_object *bo,
> +                             struct ttm_placement *placement);
>       /**
>        * struct ttm_bo_driver member move:
>        *
> @@ -642,9 +632,9 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
>   * -ERESTART: An interruptible sleep was interrupted by a signal.
>   */
>  extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
> -                         uint32_t proposed_placement,
> -                         struct ttm_mem_reg *mem,
> -                         bool interruptible, bool no_wait);
> +                             struct ttm_placement *placement,
> +                             struct ttm_mem_reg *mem,
> +                             bool interruptible, bool no_wait);
>  /**
>   * ttm_bo_wait_for_cpu
>   *



------------------------------------------------------------------------------
Return on Information:
Google Enterprise Search pays you back
Get the facts.
http://p.sf.net/sfu/google-dev2dev
--
_______________________________________________
Dri-devel mailing list
Dri-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/dri-devel

Reply via email to