Re: [PATCH v2 5/5] iommu/virtio: Support identity-mapped domains
On Sat, Nov 27, 2021 at 06:09:56PM +0100, Eric Auger wrote: > > - vdomain->viommu = 0; > > + vdomain->viommu = NULL; > nit: that change could have been done in patch 2 Ah yes, I changed that in v2 but fixed up the wrong patch > > return -EOPNOTSUPP; > > } > > - > > - vdomain->bypass = true; > > } > > > > return 0; > Besides > Reviewed-by: Eric Auger Thanks! Jean ___ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu
Re: [PATCH v2 5/5] iommu/virtio: Support identity-mapped domains
Hi Jean, On 11/23/21 4:53 PM, Jean-Philippe Brucker wrote: > Support identity domains for devices that do not offer the > VIRTIO_IOMMU_F_BYPASS_CONFIG feature, by creating 1:1 mappings between > the virtual and physical address space. Identity domains created this > way still perform noticeably better than DMA domains, because they don't > have the overhead of setting up and tearing down mappings at runtime. > The performance difference between this and bypass is minimal in > comparison. > > It does not matter that the physical addresses in the identity mappings > do not all correspond to memory. By enabling passthrough we are trusting > the device driver and the device itself to only perform DMA to suitable > locations. In some cases it may even be desirable to perform DMA to MMIO > regions. > > Reviewed-by: Kevin Tian > Signed-off-by: Jean-Philippe Brucker > --- > drivers/iommu/virtio-iommu.c | 63 +--- > 1 file changed, 58 insertions(+), 5 deletions(-) > > diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c > index eceb9281c8c1..6a8a52b4297b 100644 > --- a/drivers/iommu/virtio-iommu.c > +++ b/drivers/iommu/virtio-iommu.c > @@ -375,6 +375,55 @@ static size_t viommu_del_mappings(struct viommu_domain > *vdomain, > return unmapped; > } > > +/* > + * Fill the domain with identity mappings, skipping the device's reserved > + * regions. > + */ > +static int viommu_domain_map_identity(struct viommu_endpoint *vdev, > + struct viommu_domain *vdomain) > +{ > + int ret; > + struct iommu_resv_region *resv; > + u64 iova = vdomain->domain.geometry.aperture_start; > + u64 limit = vdomain->domain.geometry.aperture_end; > + u32 flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE; > + unsigned long granule = 1UL << __ffs(vdomain->domain.pgsize_bitmap); > + > + iova = ALIGN(iova, granule); > + limit = ALIGN_DOWN(limit + 1, granule) - 1; > + > + list_for_each_entry(resv, >resv_regions, list) { > + u64 resv_start = ALIGN_DOWN(resv->start, granule); > + u64 resv_end = ALIGN(resv->start + resv->length, granule) - 1; > + > + if (resv_end < iova || resv_start > limit) > + /* No overlap */ > + continue; > + > + if (resv_start > iova) { > + ret = viommu_add_mapping(vdomain, iova, resv_start - 1, > + (phys_addr_t)iova, flags); > + if (ret) > + goto err_unmap; > + } > + > + if (resv_end >= limit) > + return 0; > + > + iova = resv_end + 1; > + } > + > + ret = viommu_add_mapping(vdomain, iova, limit, (phys_addr_t)iova, > + flags); > + if (ret) > + goto err_unmap; > + return 0; > + > +err_unmap: > + viommu_del_mappings(vdomain, 0, iova); > + return ret; > +} > + > /* > * viommu_replay_mappings - re-send MAP requests > * > @@ -637,14 +686,18 @@ static int viommu_domain_finalise(struct > viommu_endpoint *vdev, > vdomain->viommu = viommu; > > if (domain->type == IOMMU_DOMAIN_IDENTITY) { > - if (!virtio_has_feature(viommu->vdev, > - VIRTIO_IOMMU_F_BYPASS_CONFIG)) { > + if (virtio_has_feature(viommu->vdev, > +VIRTIO_IOMMU_F_BYPASS_CONFIG)) { > + vdomain->bypass = true; > + return 0; > + } > + > + ret = viommu_domain_map_identity(vdev, vdomain); > + if (ret) { > ida_free(>domain_ids, vdomain->id); > - vdomain->viommu = 0; > + vdomain->viommu = NULL; nit: that change could have been done in patch 2 > return -EOPNOTSUPP; > } > - > - vdomain->bypass = true; > } > > return 0; Besides Reviewed-by: Eric Auger Eric ___ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu
[PATCH v2 5/5] iommu/virtio: Support identity-mapped domains
Support identity domains for devices that do not offer the VIRTIO_IOMMU_F_BYPASS_CONFIG feature, by creating 1:1 mappings between the virtual and physical address space. Identity domains created this way still perform noticeably better than DMA domains, because they don't have the overhead of setting up and tearing down mappings at runtime. The performance difference between this and bypass is minimal in comparison. It does not matter that the physical addresses in the identity mappings do not all correspond to memory. By enabling passthrough we are trusting the device driver and the device itself to only perform DMA to suitable locations. In some cases it may even be desirable to perform DMA to MMIO regions. Reviewed-by: Kevin Tian Signed-off-by: Jean-Philippe Brucker --- drivers/iommu/virtio-iommu.c | 63 +--- 1 file changed, 58 insertions(+), 5 deletions(-) diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index eceb9281c8c1..6a8a52b4297b 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -375,6 +375,55 @@ static size_t viommu_del_mappings(struct viommu_domain *vdomain, return unmapped; } +/* + * Fill the domain with identity mappings, skipping the device's reserved + * regions. + */ +static int viommu_domain_map_identity(struct viommu_endpoint *vdev, + struct viommu_domain *vdomain) +{ + int ret; + struct iommu_resv_region *resv; + u64 iova = vdomain->domain.geometry.aperture_start; + u64 limit = vdomain->domain.geometry.aperture_end; + u32 flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE; + unsigned long granule = 1UL << __ffs(vdomain->domain.pgsize_bitmap); + + iova = ALIGN(iova, granule); + limit = ALIGN_DOWN(limit + 1, granule) - 1; + + list_for_each_entry(resv, >resv_regions, list) { + u64 resv_start = ALIGN_DOWN(resv->start, granule); + u64 resv_end = ALIGN(resv->start + resv->length, granule) - 1; + + if (resv_end < iova || resv_start > limit) + /* No overlap */ + continue; + + if (resv_start > iova) { + ret = viommu_add_mapping(vdomain, iova, resv_start - 1, +(phys_addr_t)iova, flags); + if (ret) + goto err_unmap; + } + + if (resv_end >= limit) + return 0; + + iova = resv_end + 1; + } + + ret = viommu_add_mapping(vdomain, iova, limit, (phys_addr_t)iova, +flags); + if (ret) + goto err_unmap; + return 0; + +err_unmap: + viommu_del_mappings(vdomain, 0, iova); + return ret; +} + /* * viommu_replay_mappings - re-send MAP requests * @@ -637,14 +686,18 @@ static int viommu_domain_finalise(struct viommu_endpoint *vdev, vdomain->viommu = viommu; if (domain->type == IOMMU_DOMAIN_IDENTITY) { - if (!virtio_has_feature(viommu->vdev, - VIRTIO_IOMMU_F_BYPASS_CONFIG)) { + if (virtio_has_feature(viommu->vdev, + VIRTIO_IOMMU_F_BYPASS_CONFIG)) { + vdomain->bypass = true; + return 0; + } + + ret = viommu_domain_map_identity(vdev, vdomain); + if (ret) { ida_free(>domain_ids, vdomain->id); - vdomain->viommu = 0; + vdomain->viommu = NULL; return -EOPNOTSUPP; } - - vdomain->bypass = true; } return 0; -- 2.33.1 ___ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu