Re: [PATCH RFC 5/9] iommu/io-pgtable-arm: Hook up map_sg()

2021-01-28 Thread Will Deacon
On Wed, Jan 27, 2021 at 03:00:53PM -0500, Chuck Lever wrote:
> From: Isaac J. Manjarres 
> 
> Implement the map_sg io-pgtable op for the ARM LPAE io-pgtable
> code, so that IOMMU drivers can call it when they need to map
> a scatter-gather list.
> 
> Signed-off-by: Isaac J. Manjarres 
> Tested-by: Sai Prakash Ranjan 
> Signed-off-by: Chuck Lever 
> ---
>  drivers/iommu/io-pgtable-arm.c |   86 
> 
>  drivers/iommu/iommu.c  |   12 +++---
>  include/linux/iommu.h  |8 
>  3 files changed, 101 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
> index 87def58e79b5..0c11529442b8 100644
> --- a/drivers/iommu/io-pgtable-arm.c
> +++ b/drivers/iommu/io-pgtable-arm.c
> @@ -473,6 +473,91 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, 
> unsigned long iova,
>   return ret;
>  }
>  
> +static int arm_lpae_map_by_pgsize(struct io_pgtable_ops *ops,
> +   unsigned long iova, phys_addr_t paddr,
> +   size_t size, int iommu_prot, gfp_t gfp,
> +   size_t *mapped)
> +{
> + struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
> + struct io_pgtable_cfg *cfg = &data->iop.cfg;
> + arm_lpae_iopte *ptep = data->pgd;
> + int ret, lvl = data->start_level;
> + arm_lpae_iopte prot = arm_lpae_prot_to_pte(data, iommu_prot);
> + unsigned int min_pagesz = 1 << __ffs(cfg->pgsize_bitmap);
> + long iaext = (s64)(iova + size - 1) >> cfg->ias;
> + size_t pgsize;
> +
> + if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
> + pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 
> 0x%x\n",
> +iova, &paddr, size, min_pagesz);
> + return -EINVAL;
> + }
> +
> + if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
> + iaext = ~iaext;
> + if (WARN_ON(iaext || (paddr + size - 1) >> cfg->oas))
> + return -ERANGE;
> +
> + while (size) {
> + pgsize = iommu_pgsize(cfg->pgsize_bitmap, iova | paddr, size);
> + ret = __arm_lpae_map(data, iova, paddr, pgsize, prot, lvl, ptep,
> +  gfp);
> + if (ret)
> + return ret;
> +
> + iova += pgsize;
> + paddr += pgsize;
> + *mapped += pgsize;
> + size -= pgsize;
> + }
> +
> + return 0;
> +}
> +
> +static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
> +struct scatterlist *sg, unsigned int nents,
> +int iommu_prot, gfp_t gfp, size_t *mapped)
> +{
> +
> + size_t len = 0;
> + unsigned int i = 0;
> + int ret;
> + phys_addr_t start;
> +
> + *mapped = 0;
> +
> + /* If no access, then nothing to do */
> + if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
> + return 0;
> +
> + while (i <= nents) {
> + phys_addr_t s_phys = sg_phys(sg);
> +
> + if (len && s_phys != start + len) {
> + ret = arm_lpae_map_by_pgsize(ops, iova + *mapped, start,
> +  len, iommu_prot, gfp,
> +  mapped);
> +
> + if (ret)
> + return ret;
> +
> + len = 0;
> + }
> +
> + if (len) {
> + len += sg->length;
> + } else {
> + len = sg->length;
> + start = s_phys;
> + }
> +
> + if (++i < nents)
> + sg = sg_next(sg);
> + }
> +
> + return 0;
> +}

Although I really like the idea of reducing the layering here, I think we
need to figure out a way to reduce the amount of boiler-plate that ends up
in the pgtable code. Otherwise it's pretty unmaintainable.

Will
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH RFC 5/9] iommu/io-pgtable-arm: Hook up map_sg()

2021-01-27 Thread Chuck Lever
From: Isaac J. Manjarres 

Implement the map_sg io-pgtable op for the ARM LPAE io-pgtable
code, so that IOMMU drivers can call it when they need to map
a scatter-gather list.

Signed-off-by: Isaac J. Manjarres 
Tested-by: Sai Prakash Ranjan 
Signed-off-by: Chuck Lever 
---
 drivers/iommu/io-pgtable-arm.c |   86 
 drivers/iommu/iommu.c  |   12 +++---
 include/linux/iommu.h  |8 
 3 files changed, 101 insertions(+), 5 deletions(-)

diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 87def58e79b5..0c11529442b8 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -473,6 +473,91 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, 
unsigned long iova,
return ret;
 }
 
+static int arm_lpae_map_by_pgsize(struct io_pgtable_ops *ops,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, int iommu_prot, gfp_t gfp,
+ size_t *mapped)
+{
+   struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+   struct io_pgtable_cfg *cfg = &data->iop.cfg;
+   arm_lpae_iopte *ptep = data->pgd;
+   int ret, lvl = data->start_level;
+   arm_lpae_iopte prot = arm_lpae_prot_to_pte(data, iommu_prot);
+   unsigned int min_pagesz = 1 << __ffs(cfg->pgsize_bitmap);
+   long iaext = (s64)(iova + size - 1) >> cfg->ias;
+   size_t pgsize;
+
+   if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
+   pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 
0x%x\n",
+  iova, &paddr, size, min_pagesz);
+   return -EINVAL;
+   }
+
+   if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
+   iaext = ~iaext;
+   if (WARN_ON(iaext || (paddr + size - 1) >> cfg->oas))
+   return -ERANGE;
+
+   while (size) {
+   pgsize = iommu_pgsize(cfg->pgsize_bitmap, iova | paddr, size);
+   ret = __arm_lpae_map(data, iova, paddr, pgsize, prot, lvl, ptep,
+gfp);
+   if (ret)
+   return ret;
+
+   iova += pgsize;
+   paddr += pgsize;
+   *mapped += pgsize;
+   size -= pgsize;
+   }
+
+   return 0;
+}
+
+static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
+  struct scatterlist *sg, unsigned int nents,
+  int iommu_prot, gfp_t gfp, size_t *mapped)
+{
+
+   size_t len = 0;
+   unsigned int i = 0;
+   int ret;
+   phys_addr_t start;
+
+   *mapped = 0;
+
+   /* If no access, then nothing to do */
+   if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
+   return 0;
+
+   while (i <= nents) {
+   phys_addr_t s_phys = sg_phys(sg);
+
+   if (len && s_phys != start + len) {
+   ret = arm_lpae_map_by_pgsize(ops, iova + *mapped, start,
+len, iommu_prot, gfp,
+mapped);
+
+   if (ret)
+   return ret;
+
+   len = 0;
+   }
+
+   if (len) {
+   len += sg->length;
+   } else {
+   len = sg->length;
+   start = s_phys;
+   }
+
+   if (++i < nents)
+   sg = sg_next(sg);
+   }
+
+   return 0;
+}
+
 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
arm_lpae_iopte *ptep)
 {
@@ -750,6 +835,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
 
data->iop.ops = (struct io_pgtable_ops) {
.map= arm_lpae_map,
+   .map_sg = arm_lpae_map_sg,
.unmap  = arm_lpae_unmap,
.iova_to_phys   = arm_lpae_iova_to_phys,
};
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 3d099a31ddca..ed879a4d7fac 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -2346,8 +2346,8 @@ phys_addr_t iommu_iova_to_phys(struct iommu_domain 
*domain, dma_addr_t iova)
 }
 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
 
-static size_t iommu_pgsize(struct iommu_domain *domain,
-  unsigned long addr_merge, size_t size)
+size_t iommu_pgsize(unsigned long pgsize_bitmap, unsigned long addr_merge,
+   size_t size)
 {
unsigned int pgsize_idx;
size_t pgsize;
@@ -2366,7 +2366,7 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
pgsize = (1UL << (pgsize_idx + 1)) - 1;
 
/* throw away page sizes not supported by the hardware */
-   pgsize &= domain->pgsize_bitmap;
+   pgsize &= pgsize_bitmap;
 
/*