Re: [PATCH v2 4/5] iommu/virtio: Pass end address to viommu_add_mapping()

2021-11-29 Thread Jean-Philippe Brucker
On Sat, Nov 27, 2021 at 06:09:56PM -0500, Michael S. Tsirkin wrote:
> > > -static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned 
> > > long iova,
> > > -   phys_addr_t paddr, size_t size, u32 flags)
> > > +static int viommu_add_mapping(struct viommu_domain *vdomain, u64 iova, 
> > > u64 end,
> > > +   phys_addr_t paddr, u32 flags)
> > >  {
> > >   unsigned long irqflags;
> > >   struct viommu_mapping *mapping;
> 
> I am worried that API changes like that will cause subtle
> bugs since types of arguments change but not their
> number. If we forgot to update some callers it will all be messed up.
> 
> How about passing struct Range instead?

I gave struct range a try but it looks messier overall since it would only
be used to pass arguments. I think the update is safe enough because there
is one caller for viommu_add_mapping() and two for viommu_del_mappings(),
at the moment.

Thanks,
Jean

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v2 4/5] iommu/virtio: Pass end address to viommu_add_mapping()

2021-11-27 Thread Michael S. Tsirkin
On Sat, Nov 27, 2021 at 06:09:40PM +0100, Eric Auger wrote:
> 
> 
> On 11/23/21 4:53 PM, Jean-Philippe Brucker wrote:
> > To support identity mappings, the virtio-iommu driver must be able to
> > represent full 64-bit ranges internally. Pass (start, end) instead of
> > (start, size) to viommu_add/del_mapping().
> >
> > Clean comments. The one about the returned size was never true: when
> > sweeping the whole address space the returned size will most certainly
> > be smaller than 2^64.
> >
> > Reviewed-by: Kevin Tian 
> > Signed-off-by: Jean-Philippe Brucker 
> Reviewed-by: Eric Auger 
> 
> Eric
> 
> > ---
> >  drivers/iommu/virtio-iommu.c | 31 +++
> >  1 file changed, 15 insertions(+), 16 deletions(-)
> >
> > diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
> > index d63ec4d11b00..eceb9281c8c1 100644
> > --- a/drivers/iommu/virtio-iommu.c
> > +++ b/drivers/iommu/virtio-iommu.c
> > @@ -311,8 +311,8 @@ static int viommu_send_req_sync(struct viommu_dev 
> > *viommu, void *buf,
> >   *
> >   * On success, return the new mapping. Otherwise return NULL.
> >   */
> > -static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long 
> > iova,
> > - phys_addr_t paddr, size_t size, u32 flags)
> > +static int viommu_add_mapping(struct viommu_domain *vdomain, u64 iova, u64 
> > end,
> > + phys_addr_t paddr, u32 flags)
> >  {
> > unsigned long irqflags;
> > struct viommu_mapping *mapping;

I am worried that API changes like that will cause subtle
bugs since types of arguments change but not their
number. If we forgot to update some callers it will all be messed up.

How about passing struct Range instead?

> > @@ -323,7 +323,7 @@ static int viommu_add_mapping(struct viommu_domain 
> > *vdomain, unsigned long iova,
> >  
> > mapping->paddr  = paddr;
> > mapping->iova.start = iova;
> > -   mapping->iova.last  = iova + size - 1;
> > +   mapping->iova.last  = end;
> > mapping->flags  = flags;
> >  
> > spin_lock_irqsave(>mappings_lock, irqflags);
> > @@ -338,26 +338,24 @@ static int viommu_add_mapping(struct viommu_domain 
> > *vdomain, unsigned long iova,
> >   *
> >   * @vdomain: the domain
> >   * @iova: start of the range
> > - * @size: size of the range. A size of 0 corresponds to the entire address
> > - * space.
> > + * @end: end of the range
> >   *
> > - * On success, returns the number of unmapped bytes (>= size)
> > + * On success, returns the number of unmapped bytes
> >   */
> >  static size_t viommu_del_mappings(struct viommu_domain *vdomain,
> > - unsigned long iova, size_t size)
> > + u64 iova, u64 end)
> >  {
> > size_t unmapped = 0;
> > unsigned long flags;
> > -   unsigned long last = iova + size - 1;
> > struct viommu_mapping *mapping = NULL;
> > struct interval_tree_node *node, *next;
> >  
> > spin_lock_irqsave(>mappings_lock, flags);
> > -   next = interval_tree_iter_first(>mappings, iova, last);
> > +   next = interval_tree_iter_first(>mappings, iova, end);
> > while (next) {
> > node = next;
> > mapping = container_of(node, struct viommu_mapping, iova);
> > -   next = interval_tree_iter_next(node, iova, last);
> > +   next = interval_tree_iter_next(node, iova, end);
> >  
> > /* Trying to split a mapping? */
> > if (mapping->iova.start < iova)
> > @@ -656,8 +654,8 @@ static void viommu_domain_free(struct iommu_domain 
> > *domain)
> >  {
> > struct viommu_domain *vdomain = to_viommu_domain(domain);
> >  
> > -   /* Free all remaining mappings (size 2^64) */
> > -   viommu_del_mappings(vdomain, 0, 0);
> > +   /* Free all remaining mappings */
> > +   viommu_del_mappings(vdomain, 0, ULLONG_MAX);
> >  
> > if (vdomain->viommu)
> > ida_free(>viommu->domain_ids, vdomain->id);
> > @@ -742,6 +740,7 @@ static int viommu_map(struct iommu_domain *domain, 
> > unsigned long iova,
> >  {
> > int ret;
> > u32 flags;
> > +   u64 end = iova + size - 1;
> > struct virtio_iommu_req_map map;
> > struct viommu_domain *vdomain = to_viommu_domain(domain);
> >  
> > @@ -752,7 +751,7 @@ static int viommu_map(struct iommu_domain *domain, 
> > unsigned long iova,
> > if (flags & ~vdomain->map_flags)
> > return -EINVAL;
> >  
> > -   ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
> > +   ret = viommu_add_mapping(vdomain, iova, end, paddr, flags);
> > if (ret)
> > return ret;
> >  
> > @@ -761,7 +760,7 @@ static int viommu_map(struct iommu_domain *domain, 
> > unsigned long iova,
> > .domain = cpu_to_le32(vdomain->id),
> > .virt_start = cpu_to_le64(iova),
> > .phys_start = cpu_to_le64(paddr),
> > -   .virt_end   = cpu_to_le64(iova + size - 1),
> > +   .virt_end   = 

Re: [PATCH v2 4/5] iommu/virtio: Pass end address to viommu_add_mapping()

2021-11-27 Thread Eric Auger



On 11/23/21 4:53 PM, Jean-Philippe Brucker wrote:
> To support identity mappings, the virtio-iommu driver must be able to
> represent full 64-bit ranges internally. Pass (start, end) instead of
> (start, size) to viommu_add/del_mapping().
>
> Clean comments. The one about the returned size was never true: when
> sweeping the whole address space the returned size will most certainly
> be smaller than 2^64.
>
> Reviewed-by: Kevin Tian 
> Signed-off-by: Jean-Philippe Brucker 
Reviewed-by: Eric Auger 

Eric

> ---
>  drivers/iommu/virtio-iommu.c | 31 +++
>  1 file changed, 15 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
> index d63ec4d11b00..eceb9281c8c1 100644
> --- a/drivers/iommu/virtio-iommu.c
> +++ b/drivers/iommu/virtio-iommu.c
> @@ -311,8 +311,8 @@ static int viommu_send_req_sync(struct viommu_dev 
> *viommu, void *buf,
>   *
>   * On success, return the new mapping. Otherwise return NULL.
>   */
> -static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long 
> iova,
> -   phys_addr_t paddr, size_t size, u32 flags)
> +static int viommu_add_mapping(struct viommu_domain *vdomain, u64 iova, u64 
> end,
> +   phys_addr_t paddr, u32 flags)
>  {
>   unsigned long irqflags;
>   struct viommu_mapping *mapping;
> @@ -323,7 +323,7 @@ static int viommu_add_mapping(struct viommu_domain 
> *vdomain, unsigned long iova,
>  
>   mapping->paddr  = paddr;
>   mapping->iova.start = iova;
> - mapping->iova.last  = iova + size - 1;
> + mapping->iova.last  = end;
>   mapping->flags  = flags;
>  
>   spin_lock_irqsave(>mappings_lock, irqflags);
> @@ -338,26 +338,24 @@ static int viommu_add_mapping(struct viommu_domain 
> *vdomain, unsigned long iova,
>   *
>   * @vdomain: the domain
>   * @iova: start of the range
> - * @size: size of the range. A size of 0 corresponds to the entire address
> - *   space.
> + * @end: end of the range
>   *
> - * On success, returns the number of unmapped bytes (>= size)
> + * On success, returns the number of unmapped bytes
>   */
>  static size_t viommu_del_mappings(struct viommu_domain *vdomain,
> -   unsigned long iova, size_t size)
> +   u64 iova, u64 end)
>  {
>   size_t unmapped = 0;
>   unsigned long flags;
> - unsigned long last = iova + size - 1;
>   struct viommu_mapping *mapping = NULL;
>   struct interval_tree_node *node, *next;
>  
>   spin_lock_irqsave(>mappings_lock, flags);
> - next = interval_tree_iter_first(>mappings, iova, last);
> + next = interval_tree_iter_first(>mappings, iova, end);
>   while (next) {
>   node = next;
>   mapping = container_of(node, struct viommu_mapping, iova);
> - next = interval_tree_iter_next(node, iova, last);
> + next = interval_tree_iter_next(node, iova, end);
>  
>   /* Trying to split a mapping? */
>   if (mapping->iova.start < iova)
> @@ -656,8 +654,8 @@ static void viommu_domain_free(struct iommu_domain 
> *domain)
>  {
>   struct viommu_domain *vdomain = to_viommu_domain(domain);
>  
> - /* Free all remaining mappings (size 2^64) */
> - viommu_del_mappings(vdomain, 0, 0);
> + /* Free all remaining mappings */
> + viommu_del_mappings(vdomain, 0, ULLONG_MAX);
>  
>   if (vdomain->viommu)
>   ida_free(>viommu->domain_ids, vdomain->id);
> @@ -742,6 +740,7 @@ static int viommu_map(struct iommu_domain *domain, 
> unsigned long iova,
>  {
>   int ret;
>   u32 flags;
> + u64 end = iova + size - 1;
>   struct virtio_iommu_req_map map;
>   struct viommu_domain *vdomain = to_viommu_domain(domain);
>  
> @@ -752,7 +751,7 @@ static int viommu_map(struct iommu_domain *domain, 
> unsigned long iova,
>   if (flags & ~vdomain->map_flags)
>   return -EINVAL;
>  
> - ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
> + ret = viommu_add_mapping(vdomain, iova, end, paddr, flags);
>   if (ret)
>   return ret;
>  
> @@ -761,7 +760,7 @@ static int viommu_map(struct iommu_domain *domain, 
> unsigned long iova,
>   .domain = cpu_to_le32(vdomain->id),
>   .virt_start = cpu_to_le64(iova),
>   .phys_start = cpu_to_le64(paddr),
> - .virt_end   = cpu_to_le64(iova + size - 1),
> + .virt_end   = cpu_to_le64(end),
>   .flags  = cpu_to_le32(flags),
>   };
>  
> @@ -770,7 +769,7 @@ static int viommu_map(struct iommu_domain *domain, 
> unsigned long iova,
>  
>   ret = viommu_send_req_sync(vdomain->viommu, , sizeof(map));
>   if (ret)
> - viommu_del_mappings(vdomain, iova, size);
> + viommu_del_mappings(vdomain, iova, end);
>  
>   return ret;
>  }
> @@ 

[PATCH v2 4/5] iommu/virtio: Pass end address to viommu_add_mapping()

2021-11-23 Thread Jean-Philippe Brucker
To support identity mappings, the virtio-iommu driver must be able to
represent full 64-bit ranges internally. Pass (start, end) instead of
(start, size) to viommu_add/del_mapping().

Clean comments. The one about the returned size was never true: when
sweeping the whole address space the returned size will most certainly
be smaller than 2^64.

Reviewed-by: Kevin Tian 
Signed-off-by: Jean-Philippe Brucker 
---
 drivers/iommu/virtio-iommu.c | 31 +++
 1 file changed, 15 insertions(+), 16 deletions(-)

diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index d63ec4d11b00..eceb9281c8c1 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -311,8 +311,8 @@ static int viommu_send_req_sync(struct viommu_dev *viommu, 
void *buf,
  *
  * On success, return the new mapping. Otherwise return NULL.
  */
-static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long 
iova,
- phys_addr_t paddr, size_t size, u32 flags)
+static int viommu_add_mapping(struct viommu_domain *vdomain, u64 iova, u64 end,
+ phys_addr_t paddr, u32 flags)
 {
unsigned long irqflags;
struct viommu_mapping *mapping;
@@ -323,7 +323,7 @@ static int viommu_add_mapping(struct viommu_domain 
*vdomain, unsigned long iova,
 
mapping->paddr  = paddr;
mapping->iova.start = iova;
-   mapping->iova.last  = iova + size - 1;
+   mapping->iova.last  = end;
mapping->flags  = flags;
 
spin_lock_irqsave(>mappings_lock, irqflags);
@@ -338,26 +338,24 @@ static int viommu_add_mapping(struct viommu_domain 
*vdomain, unsigned long iova,
  *
  * @vdomain: the domain
  * @iova: start of the range
- * @size: size of the range. A size of 0 corresponds to the entire address
- * space.
+ * @end: end of the range
  *
- * On success, returns the number of unmapped bytes (>= size)
+ * On success, returns the number of unmapped bytes
  */
 static size_t viommu_del_mappings(struct viommu_domain *vdomain,
- unsigned long iova, size_t size)
+ u64 iova, u64 end)
 {
size_t unmapped = 0;
unsigned long flags;
-   unsigned long last = iova + size - 1;
struct viommu_mapping *mapping = NULL;
struct interval_tree_node *node, *next;
 
spin_lock_irqsave(>mappings_lock, flags);
-   next = interval_tree_iter_first(>mappings, iova, last);
+   next = interval_tree_iter_first(>mappings, iova, end);
while (next) {
node = next;
mapping = container_of(node, struct viommu_mapping, iova);
-   next = interval_tree_iter_next(node, iova, last);
+   next = interval_tree_iter_next(node, iova, end);
 
/* Trying to split a mapping? */
if (mapping->iova.start < iova)
@@ -656,8 +654,8 @@ static void viommu_domain_free(struct iommu_domain *domain)
 {
struct viommu_domain *vdomain = to_viommu_domain(domain);
 
-   /* Free all remaining mappings (size 2^64) */
-   viommu_del_mappings(vdomain, 0, 0);
+   /* Free all remaining mappings */
+   viommu_del_mappings(vdomain, 0, ULLONG_MAX);
 
if (vdomain->viommu)
ida_free(>viommu->domain_ids, vdomain->id);
@@ -742,6 +740,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned 
long iova,
 {
int ret;
u32 flags;
+   u64 end = iova + size - 1;
struct virtio_iommu_req_map map;
struct viommu_domain *vdomain = to_viommu_domain(domain);
 
@@ -752,7 +751,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned 
long iova,
if (flags & ~vdomain->map_flags)
return -EINVAL;
 
-   ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
+   ret = viommu_add_mapping(vdomain, iova, end, paddr, flags);
if (ret)
return ret;
 
@@ -761,7 +760,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned 
long iova,
.domain = cpu_to_le32(vdomain->id),
.virt_start = cpu_to_le64(iova),
.phys_start = cpu_to_le64(paddr),
-   .virt_end   = cpu_to_le64(iova + size - 1),
+   .virt_end   = cpu_to_le64(end),
.flags  = cpu_to_le32(flags),
};
 
@@ -770,7 +769,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned 
long iova,
 
ret = viommu_send_req_sync(vdomain->viommu, , sizeof(map));
if (ret)
-   viommu_del_mappings(vdomain, iova, size);
+   viommu_del_mappings(vdomain, iova, end);
 
return ret;
 }
@@ -783,7 +782,7 @@ static size_t viommu_unmap(struct iommu_domain *domain, 
unsigned long iova,
struct virtio_iommu_req_unmap unmap;
struct viommu_domain *vdomain = to_viommu_domain(domain);
 
-