On Tue, 26 Jun 2018 15:59:25 +1000
Alexey Kardashevskiy wrote:
> The size is always equal to 1 page so let's use this. Later on this will
> be used for other checks which use page shifts to check the granularity
> of access.
>
> This should cause no behavioral change.
>
> Reviewed-by: David Gibson
> Signed-off-by: Alexey Kardashevskiy
> ---
> drivers/vfio/vfio_iommu_spapr_tce.c | 8
> 1 file changed, 4 insertions(+), 4 deletions(-)
I assume a v3+ will go in through the ppc tree since the bulk of the
series is there. For this,
Acked-by: Alex Williamson
> diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c
> b/drivers/vfio/vfio_iommu_spapr_tce.c
> index 759a5bd..2da5f05 100644
> --- a/drivers/vfio/vfio_iommu_spapr_tce.c
> +++ b/drivers/vfio/vfio_iommu_spapr_tce.c
> @@ -457,13 +457,13 @@ static void tce_iommu_unuse_page(struct tce_container
> *container,
> }
>
> static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
> - unsigned long tce, unsigned long size,
> + unsigned long tce, unsigned long shift,
> unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
> {
> long ret = 0;
> struct mm_iommu_table_group_mem_t *mem;
>
> - mem = mm_iommu_lookup(container->mm, tce, size);
> + mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift);
> if (!mem)
> return -EINVAL;
>
> @@ -487,7 +487,7 @@ static void tce_iommu_unuse_page_v2(struct tce_container
> *container,
> if (!pua)
> return;
>
> - ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl),
> + ret = tce_iommu_prereg_ua_to_hpa(container, *pua, tbl->it_page_shift,
> , );
> if (ret)
> pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
> @@ -611,7 +611,7 @@ static long tce_iommu_build_v2(struct tce_container
> *container,
> entry + i);
>
> ret = tce_iommu_prereg_ua_to_hpa(container,
> - tce, IOMMU_PAGE_SIZE(tbl), , );
> + tce, tbl->it_page_shift, , );
> if (ret)
> break;
>