Am Sonntag, dem 08.09.2024 um 17:43 +0800 schrieb Sui Jingfeng:
> The vunmap() can be used to release virtual mapping obtained by vmap(),
> While the vmap() is used to make a long duration mapping of multiple
> physical pages into a contiguous virtual space.
> 
> Make the implementation-specific vunmap() operation untangled with the
> etnaviv_gem_xxx_release() function. As then, the etnaviv_gem_xxx_release()
> only need to responsible for the release page works.
> 
> The etnaviv_gem_vunmap() is added for driver internal usa case, where no
> DRM GEM framework is involved.
> 
> Signed-off-by: Sui Jingfeng <sui.jingf...@linux.dev>
> ---
>  drivers/gpu/drm/etnaviv/etnaviv_drv.h       |  1 +
>  drivers/gpu/drm/etnaviv/etnaviv_gem.c       | 38 ++++++++++++++++++++-
>  drivers/gpu/drm/etnaviv/etnaviv_gem.h       |  1 +
>  drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c | 13 ++++---
>  4 files changed, 47 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h 
> b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
> index b3eb1662e90c..2eb2ff13f6e8 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
> @@ -61,6 +61,7 @@ struct drm_gem_object 
> *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
>  int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
>  void etnaviv_gem_prime_unpin(struct drm_gem_object *obj);
>  void *etnaviv_gem_vmap(struct drm_gem_object *obj);
> +void etnaviv_gem_vunmap(struct drm_gem_object *obj);
>  int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
>               struct drm_etnaviv_timespec *timeout);
>  int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c 
> b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> index 6bdf72cd9e85..fad23494d08e 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> @@ -340,6 +340,25 @@ void *etnaviv_gem_vmap(struct drm_gem_object *obj)
>       return etnaviv_obj->vaddr;
>  }
>  
> +void etnaviv_gem_vunmap(struct drm_gem_object *obj)
> +{
> +     struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
> +
> +     if (!etnaviv_obj->vaddr)
> +             return;
> +
> +     mutex_lock(&etnaviv_obj->lock);

Either the mutex must extend across the above vaddr check, or the check
need to be repeated within the mutex section, similar to the
implementation in etnaviv_gem_vmap.

Regards,
Lucas

> +     etnaviv_obj->ops->vunmap(etnaviv_obj);
> +     etnaviv_obj->vaddr = NULL;
> +     mutex_unlock(&etnaviv_obj->lock);
> +}
> +
> +static void etnaviv_gem_object_vunmap(struct drm_gem_object *obj,
> +                                   struct iosys_map *map)
> +{
> +     etnaviv_gem_vunmap(obj);
> +}
> +
>  static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
>  {
>       struct page **pages;
> @@ -471,14 +490,21 @@ void etnaviv_gem_describe_objects(struct 
> etnaviv_drm_private *priv,
>  
>  static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
>  {
> -     vunmap(etnaviv_obj->vaddr);
>       put_pages(etnaviv_obj);
>  }
>  
> +static void etnaviv_gem_shmem_vunmap(struct etnaviv_gem_object *etnaviv_obj)
> +{
> +     lockdep_assert_held(&etnaviv_obj->lock);
> +
> +     vunmap(etnaviv_obj->vaddr);
> +}
> +
>  static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
>       .get_pages = etnaviv_gem_shmem_get_pages,
>       .release = etnaviv_gem_shmem_release,
>       .vmap = etnaviv_gem_vmap_impl,
> +     .vunmap = etnaviv_gem_shmem_vunmap,
>       .mmap = etnaviv_gem_mmap_obj,
>  };
>  
> @@ -508,6 +534,7 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj)
>               kfree(mapping);
>       }
>  
> +     etnaviv_obj->ops->vunmap(etnaviv_obj);
>       etnaviv_obj->ops->release(etnaviv_obj);
>       drm_gem_object_release(obj);
>  
> @@ -569,6 +596,7 @@ static const struct drm_gem_object_funcs 
> etnaviv_gem_object_funcs = {
>       .unpin = etnaviv_gem_prime_unpin,
>       .get_sg_table = etnaviv_gem_prime_get_sg_table,
>       .vmap = etnaviv_gem_prime_vmap,
> +     .vunmap = etnaviv_gem_object_vunmap,
>       .mmap = etnaviv_gem_mmap,
>       .vm_ops = &vm_ops,
>  };
> @@ -723,6 +751,13 @@ static void etnaviv_gem_userptr_release(struct 
> etnaviv_gem_object *etnaviv_obj)
>       }
>  }
>  
> +static void etnaviv_gem_userptr_vunmap(struct etnaviv_gem_object 
> *etnaviv_obj)
> +{
> +     lockdep_assert_held(&etnaviv_obj->lock);
> +
> +     vunmap(etnaviv_obj->vaddr);
> +}
> +
>  static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object 
> *etnaviv_obj,
>               struct vm_area_struct *vma)
>  {
> @@ -733,6 +768,7 @@ static const struct etnaviv_gem_ops 
> etnaviv_gem_userptr_ops = {
>       .get_pages = etnaviv_gem_userptr_get_pages,
>       .release = etnaviv_gem_userptr_release,
>       .vmap = etnaviv_gem_vmap_impl,
> +     .vunmap = etnaviv_gem_userptr_vunmap,
>       .mmap = etnaviv_gem_userptr_mmap_obj,
>  };
>  
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h 
> b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
> index 3f8fe19a77cc..d4965de3007c 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
> @@ -65,6 +65,7 @@ struct etnaviv_gem_ops {
>       int (*get_pages)(struct etnaviv_gem_object *);
>       void (*release)(struct etnaviv_gem_object *);
>       void *(*vmap)(struct etnaviv_gem_object *);
> +     void (*vunmap)(struct etnaviv_gem_object *);
>       int (*mmap)(struct etnaviv_gem_object *, struct vm_area_struct *);
>  };
>  
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c 
> b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
> index 6b98200068e4..bea50d720450 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
> @@ -62,11 +62,6 @@ void etnaviv_gem_prime_unpin(struct drm_gem_object *obj)
>  
>  static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
>  {
> -     struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr);
> -
> -     if (etnaviv_obj->vaddr)
> -             
> dma_buf_vunmap_unlocked(etnaviv_obj->base.import_attach->dmabuf, &map);
> -
>       /* Don't drop the pages for imported dmabuf, as they are not
>        * ours, just free the array we allocated:
>        */
> @@ -88,6 +83,13 @@ static void *etnaviv_gem_prime_vmap_impl(struct 
> etnaviv_gem_object *etnaviv_obj)
>       return map.vaddr;
>  }
>  
> +static void etnaviv_gem_prime_vunmap(struct etnaviv_gem_object *etnaviv_obj)
> +{
> +     struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr);
> +
> +     dma_buf_vunmap_unlocked(etnaviv_obj->base.import_attach->dmabuf, &map);
> +}
> +
>  static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
>               struct vm_area_struct *vma)
>  {
> @@ -106,6 +108,7 @@ static const struct etnaviv_gem_ops etnaviv_gem_prime_ops 
> = {
>       /* .get_pages should never be called */
>       .release = etnaviv_gem_prime_release,
>       .vmap = etnaviv_gem_prime_vmap_impl,
> +     .vunmap = etnaviv_gem_prime_vunmap,
>       .mmap = etnaviv_gem_prime_mmap_obj,
>  };
>  

Reply via email to