On Wed, Apr 22, 2020 at 05:25:17PM -0400, Michael J. Ruhl wrote:
> LMEM backed buffer objects do not have struct page information.
> Instead the dma_address of the struct sg is used to store the
> LMEM address information (relative to the device, this is not
> the CPU physical address).
> 
> The dmabuf map handler requires pages to do a dma_map_xx.
> 
> Add new mapping/unmapping functions, based on the LMEM usage
> of the dma_address to allow LMEM backed buffer objects to be
> mapped.
> 
> Before mapping check the peer2peer distance to verify that P2P
> dma can occur.

So this is supposed to check the importer's allow_peer2peer flag, and that
one is supposed to require the implementation of ->move_notify. Which
requires a pile of locking changes to align with dma_resv.

By not doing all that you avoid the lockdep splats, but you're also
breaking the peer2peer dma-buf contract big time :-)

I think this needs more work, or I need better glasses in case I'm not
spotting where this is all done.
-Daniel

> 
> Signed-off-by: Michael J. Ruhl <michael.j.r...@intel.com>
> ---
>  drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 82 ++++++++++++++++++++--
>  1 file changed, 76 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 
> b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> index 7ea4abb6a896..402c989cc23d 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> @@ -7,6 +7,7 @@
>  #include <linux/dma-buf.h>
>  #include <linux/highmem.h>
>  #include <linux/dma-resv.h>
> +#include <linux/pci-p2pdma.h>
>  #include <linux/scatterlist.h>
>  
>  #include "i915_drv.h"
> @@ -18,6 +19,67 @@ static struct drm_i915_gem_object *dma_buf_to_obj(struct 
> dma_buf *buf)
>       return to_intel_bo(buf->priv);
>  }
>  
> +static void dmabuf_unmap_addr(struct device *dev, struct scatterlist *sgl,
> +                           int nents, enum dma_data_direction dir)
> +{
> +     struct scatterlist *sg;
> +     int i;
> +
> +     for_each_sg(sgl, sg, nents, i)
> +             dma_unmap_resource(dev, sg_dma_address(sg), sg_dma_len(sg),
> +                                dir, 0);
> +}
> +
> +/**
> + * dmabuf_map_addr - Update LMEM address to a physical address and map the
> + * resource.
> + * @dev: valid device
> + * @obj: valid i915 GEM object
> + * @sg: scatter list to appy mapping to
> + * @nents: number of entries in the scatter list
> + * @dir: DMA direction
> + *
> + * The dma_address of the scatter list is the LMEM "address".  From this the
> + * actual physical address can be determined.
> + *
> + * Return of 0 means error.
> + *
> + */
> +static int dmabuf_map_addr(struct device *dev, struct drm_i915_gem_object 
> *obj,
> +                        struct scatterlist *sgl, int nents,
> +                        enum dma_data_direction dir)
> +{
> +     struct scatterlist *sg;
> +     phys_addr_t addr;
> +     int distance;
> +     int i;
> +
> +     distance = pci_p2pdma_distance_many(obj->base.dev->pdev, &dev, 1,
> +                                         true);
> +     if (distance < 0) {
> +             pr_info("%s: from: %s  to: %s  distance: %d\n", __func__,
> +                     pci_name(obj->base.dev->pdev), dev_name(dev),
> +                     distance);
> +             return 0;
> +     }
> +
> +     for_each_sg(sgl, sg, nents, i) {
> +             addr = sg_dma_address(sg) + obj->mm.region->io_start;
> +
> +             sg->dma_address = dma_map_resource(dev, addr, sg->length, dir,
> +                                                0);
> +             if (dma_mapping_error(dev, sg->dma_address))
> +                     goto unmap;
> +             sg->dma_length = sg->length;
> +     }
> +
> +     return nents;
> +
> +unmap:
> +     dmabuf_unmap_addr(dev, sgl, i, dir);
> +     return 0;
> +}
> +
>  static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment 
> *attach,
>                                            enum dma_data_direction dir)
>  {
> @@ -44,12 +106,17 @@ static struct sg_table *i915_gem_map_dma_buf(struct 
> dma_buf_attachment *attach,
>       dst = sgt->sgl;
>       for_each_sg(obj->mm.pages->sgl, src, obj->mm.pages->nents, i) {
>               sg_set_page(dst, sg_page(src), src->length, 0);
> +             sg_dma_address(dst) = sg_dma_address(src);
>               dst = sg_next(dst);
>       }
>  
> -     if (!dma_map_sg_attrs(attach->dev,
> -                           sgt->sgl, sgt->nents, dir,
> -                           DMA_ATTR_SKIP_CPU_SYNC)) {
> +     if (i915_gem_object_has_struct_page(obj))
> +             ret = dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
> +                                    DMA_ATTR_SKIP_CPU_SYNC);
> +     else
> +             ret = dmabuf_map_addr(attach->dev, obj, sgt->sgl, sgt->nents,
> +                                   dir);
> +     if (!ret) {
>               ret = -ENOMEM;
>               goto err_free_sg;
>       }
> @@ -72,9 +139,12 @@ static void i915_gem_unmap_dma_buf(struct 
> dma_buf_attachment *attach,
>  {
>       struct drm_i915_gem_object *obj = dma_buf_to_obj(attach->dmabuf);
>  
> -     dma_unmap_sg_attrs(attach->dev,
> -                        sgt->sgl, sgt->nents, dir,
> -                        DMA_ATTR_SKIP_CPU_SYNC);
> +     if (i915_gem_object_has_struct_page(obj))
> +             dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
> +                                DMA_ATTR_SKIP_CPU_SYNC);
> +     else
> +             dmabuf_unmap_addr(attach->dev, sgt->sgl, sgt->nents, dir);
> +
>       sg_free_table(sgt);
>       kfree(sgt);
>  
> -- 
> 2.21.0
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to