On Mon, Feb 09, 2026 at 10:44:43AM +0100, Thomas Hellström wrote:
> On Wed, 2026-02-04 at 20:19 -0800, Matthew Brost wrote:
> > The dma-map IOVA alloc, link, and sync APIs perform significantly
> > better
> > than dma-map / dma-unmap, as they avoid costly IOMMU
> > synchronizations.
> > This difference is especially noticeable when mapping a 2MB region in
> > 4KB pages.
> >
> > Use the IOVA alloc, link, and sync APIs for GPU SVM, which create DMA
> > mappings between the CPU and GPU.
> >
> > Signed-off-by: Matthew Brost <[email protected]>
> > ---
> > v3:
> > - Always link IOVA in mixed mappings
> > - Sync IOVA
> > v4:
> > - Initialize IOVA state in get_pages
> > - Use pack IOVA linking (Jason)
> > - s/page_to_phys/hmm_pfn_to_phys (Leon)
> >
> > drivers/gpu/drm/drm_gpusvm.c | 55 ++++++++++++++++++++++++++++++----
> > --
> > include/drm/drm_gpusvm.h | 5 ++++
> > 2 files changed, 52 insertions(+), 8 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/drm_gpusvm.c
> > b/drivers/gpu/drm/drm_gpusvm.c
> > index 4b8130a4ce95..800caaf0a783 100644
> > --- a/drivers/gpu/drm/drm_gpusvm.c
> > +++ b/drivers/gpu/drm/drm_gpusvm.c
> > @@ -1139,11 +1139,19 @@ static void __drm_gpusvm_unmap_pages(struct
> > drm_gpusvm *gpusvm,
> > struct drm_gpusvm_pages_flags flags = {
> > .__flags = svm_pages->flags.__flags,
> > };
> > + bool use_iova = dma_use_iova(&svm_pages->state);
> > +
> > + if (use_iova) {
> > + dma_iova_unlink(dev, &svm_pages->state, 0,
> > + svm_pages->state_offset,
> > + svm_pages->dma_addr[0].dir,
> > 0);
> > + dma_iova_free(dev, &svm_pages->state);
> > + }
> >
> > for (i = 0, j = 0; i < npages; j++) {
> > struct drm_pagemap_addr *addr = &svm_pages-
> > >dma_addr[j];
> >
> > - if (addr->proto == DRM_INTERCONNECT_SYSTEM)
> > + if (!use_iova && addr->proto ==
> > DRM_INTERCONNECT_SYSTEM)
> > dma_unmap_page(dev,
> > addr->addr,
> > PAGE_SIZE << addr-
> > >order,
> > @@ -1408,6 +1416,7 @@ int drm_gpusvm_get_pages(struct drm_gpusvm
> > *gpusvm,
> > struct drm_gpusvm_pages_flags flags;
> > enum dma_data_direction dma_dir = ctx->read_only ?
> > DMA_TO_DEVICE :
> >
> > DMA_BIDIRECTIONAL;
> > + struct dma_iova_state *state = &svm_pages->state;
> >
> > retry:
> > if (time_after(jiffies, timeout))
> > @@ -1446,6 +1455,9 @@ int drm_gpusvm_get_pages(struct drm_gpusvm
> > *gpusvm,
> > if (err)
> > goto err_free;
> >
> > + *state = (struct dma_iova_state){};
> > + svm_pages->state_offset = 0;
> > +
> > map_pages:
> > /*
> > * Perform all dma mappings under the notifier lock to not
> > @@ -1539,13 +1551,33 @@ int drm_gpusvm_get_pages(struct drm_gpusvm
> > *gpusvm,
> > goto err_unmap;
> > }
> >
> > - addr = dma_map_page(gpusvm->drm->dev,
> > - page, 0,
> > - PAGE_SIZE << order,
> > - dma_dir);
> > - if (dma_mapping_error(gpusvm->drm->dev,
> > addr)) {
> > - err = -EFAULT;
> > - goto err_unmap;
> > + if (!i)
> > + dma_iova_try_alloc(gpusvm->drm->dev,
> > state,
> > + npages *
> > PAGE_SIZE >=
> > + HPAGE_PMD_SIZE ?
> > + HPAGE_PMD_SIZE :
> > 0,
>
> Doc says "callers that always do PAGE_SIZE aligned transfers can always
> pass 0 here", so can be simplified?
>
* Note: @phys is only used to calculate the IOVA alignment. Callers that always
* do PAGE_SIZE aligned transfers can safely pass 0 here.
So 0 would be safe but possibly suboptimal. For mapping greater than or
equal to 2M, we'd like 2M MB alignment so large GPU pages can used too.
I think passing in '0' could result in odd alignment.
I am assuming other vendors have 2M GPU pages here too but that seems
like somewhat safe assumption...
Matt
>
> > + npages *
> > PAGE_SIZE);
> > +
> > + if (dma_use_iova(state)) {
> > + err = dma_iova_link(gpusvm->drm-
> > >dev, state,
> > +
> > hmm_pfn_to_phys(pfns[i]),
> > + svm_pages-
> > >state_offset,
> > + PAGE_SIZE <<
> > order,
> > + dma_dir, 0);
> > + if (err)
> > + goto err_unmap;
> > +
> > + addr = state->addr + svm_pages-
> > >state_offset;
> > + svm_pages->state_offset += PAGE_SIZE
> > << order;
> > + } else {
> > + addr = dma_map_page(gpusvm->drm-
> > >dev,
> > + page, 0,
> > + PAGE_SIZE <<
> > order,
> > + dma_dir);
> > + if (dma_mapping_error(gpusvm->drm-
> > >dev, addr)) {
> > + err = -EFAULT;
> > + goto err_unmap;
> > + }
> > }
> >
> > svm_pages->dma_addr[j] =
> > drm_pagemap_addr_encode
> > @@ -1557,6 +1589,13 @@ int drm_gpusvm_get_pages(struct drm_gpusvm
> > *gpusvm,
> > flags.has_dma_mapping = true;
> > }
> >
> > + if (dma_use_iova(state)) {
> > + err = dma_iova_sync(gpusvm->drm->dev, state, 0,
> > + svm_pages->state_offset);
> > + if (err)
> > + goto err_unmap;
> > + }
> > +
> > if (pagemap) {
> > flags.has_devmem_pages = true;
> > drm_pagemap_get(dpagemap);
> > diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h
> > index 2578ac92a8d4..cd94bb2ee6ee 100644
> > --- a/include/drm/drm_gpusvm.h
> > +++ b/include/drm/drm_gpusvm.h
> > @@ -6,6 +6,7 @@
> > #ifndef __DRM_GPUSVM_H__
> > #define __DRM_GPUSVM_H__
> >
> > +#include <linux/dma-mapping.h>
> > #include <linux/kref.h>
> > #include <linux/interval_tree.h>
> > #include <linux/mmu_notifier.h>
> > @@ -136,6 +137,8 @@ struct drm_gpusvm_pages_flags {
> > * @dma_addr: Device address array
> > * @dpagemap: The struct drm_pagemap of the device pages we're dma-
> > mapping.
> > * Note this is assuming only one drm_pagemap per range
> > is allowed.
> > + * @state: DMA IOVA state for mapping.
> > + * @state_offset: DMA IOVA offset for mapping.
> > * @notifier_seq: Notifier sequence number of the range's pages
> > * @flags: Flags for range
> > * @flags.migrate_devmem: Flag indicating whether the range can be
> > migrated to device memory
> > @@ -147,6 +150,8 @@ struct drm_gpusvm_pages_flags {
> > struct drm_gpusvm_pages {
> > struct drm_pagemap_addr *dma_addr;
> > struct drm_pagemap *dpagemap;
> > + struct dma_iova_state state;
> > + unsigned long state_offset;
> > unsigned long notifier_seq;
> > struct drm_gpusvm_pages_flags flags;
> > };
>
> Otherwise LGTM.
> Reviewed-by: Thomas Hellström <[email protected]>